summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2024-03-26 12:08:35 +0000
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2024-03-26 12:08:35 +0000
commitf843bc9bc952bdeb21586ee921d29f5c2ae5a863 (patch)
tree05daf639c7add0e0fcb94f65143f10d7d87bd0b5 /include
parent3c688a55831db128cc709396992886396d4f1654 (diff)
parent3d5a640ecf92e4c8d6e157849217b68984a819bc (diff)
Merge branches 'sa1100-for-next' and 'unstable/sa11x0-asoc' into sa1100
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acnames.h1
-rw-r--r--include/acpi/acpi_bus.h197
-rw-r--r--include/acpi/acpi_drivers.h2
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/actbl.h3
-rw-r--r--include/acpi/actbl1.h5
-rw-r--r--include/acpi/actbl2.h88
-rw-r--r--include/acpi/actbl3.h6
-rw-r--r--include/acpi/cppc_acpi.h2
-rw-r--r--include/acpi/ghes.h4
-rw-r--r--include/acpi/pcc.h13
-rw-r--r--include/acpi/pdc_intel.h36
-rw-r--r--include/acpi/platform/aclinux.h1
-rw-r--r--include/acpi/platform/aczephyr.h3
-rw-r--r--include/acpi/proc_cap_intel.h40
-rw-r--r--include/acpi/processor.h5
-rw-r--r--include/acpi/video.h9
-rw-r--r--include/asm-generic/Kbuild3
-rw-r--r--include/asm-generic/atomic.h3
-rw-r--r--include/asm-generic/bitops/atomic.h12
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h28
-rw-r--r--include/asm-generic/bitops/lock.h26
-rw-r--r--include/asm-generic/bug.h5
-rw-r--r--include/asm-generic/bugs.h11
-rw-r--r--include/asm-generic/cacheflush.h13
-rw-r--r--include/asm-generic/cfi.h5
-rw-r--r--include/asm-generic/checksum.h6
-rw-r--r--include/asm-generic/cmpxchg-local.h2
-rw-r--r--include/asm-generic/current.h2
-rw-r--r--include/asm-generic/export.h83
-rw-r--r--include/asm-generic/fb.h126
-rw-r--r--include/asm-generic/hugetlb.h4
-rw-r--r--include/asm-generic/hyperv-tlfs.h1
-rw-r--r--include/asm-generic/ide_iops.h39
-rw-r--r--include/asm-generic/io.h31
-rw-r--r--include/asm-generic/iomap.h6
-rw-r--r--include/asm-generic/mshyperv.h19
-rw-r--r--include/asm-generic/numa.h2
-rw-r--r--include/asm-generic/page.h12
-rw-r--r--include/asm-generic/percpu.h201
-rw-r--r--include/asm-generic/pgalloc.h93
-rw-r--r--include/asm-generic/preempt.h14
-rw-r--r--include/asm-generic/qspinlock.h2
-rw-r--r--include/asm-generic/spinlock.h16
-rw-r--r--include/asm-generic/tlb.h12
-rw-r--r--include/asm-generic/unaligned.h24
-rw-r--r--include/asm-generic/vmlinux.lds.h28
-rw-r--r--include/asm-generic/word-at-a-time.h2
-rw-r--r--include/clocksource/hyperv_timer.h24
-rw-r--r--include/crypto/aead.h12
-rw-r--r--include/crypto/akcipher.h36
-rw-r--r--include/crypto/algapi.h9
-rw-r--r--include/crypto/b128ops.h14
-rw-r--r--include/crypto/engine.h120
-rw-r--r--include/crypto/hash.h102
-rw-r--r--include/crypto/hash_info.h1
-rw-r--r--include/crypto/if_alg.h14
-rw-r--r--include/crypto/internal/cipher.h2
-rw-r--r--include/crypto/internal/engine.h74
-rw-r--r--include/crypto/internal/hash.h21
-rw-r--r--include/crypto/internal/sig.h17
-rw-r--r--include/crypto/internal/skcipher.h130
-rw-r--r--include/crypto/public_key.h14
-rw-r--r--include/crypto/sha2.h2
-rw-r--r--include/crypto/sha256_base.h50
-rw-r--r--include/crypto/sig.h140
-rw-r--r--include/crypto/skcipher.h395
-rw-r--r--include/crypto/sm2.h21
-rw-r--r--include/drm/amd_asic_type.h5
-rw-r--r--include/drm/bridge/aux-bridge.h52
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h16
-rw-r--r--include/drm/bridge/samsung-dsim.h12
-rw-r--r--include/drm/display/drm_dp.h35
-rw-r--r--include/drm/display/drm_dp_helper.h50
-rw-r--r--include/drm/display/drm_dp_mst_helper.h60
-rw-r--r--include/drm/display/drm_dsc_helper.h13
-rw-r--r--include/drm/drm_accel.h9
-rw-r--r--include/drm/drm_aperture.h7
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_atomic_helper.h7
-rw-r--r--include/drm/drm_auth.h22
-rw-r--r--include/drm/drm_bridge.h7
-rw-r--r--include/drm/drm_buddy.h6
-rw-r--r--include/drm/drm_client.h2
-rw-r--r--include/drm/drm_color_mgmt.h20
-rw-r--r--include/drm/drm_connector.h157
-rw-r--r--include/drm/drm_crtc.h5
-rw-r--r--include/drm/drm_debugfs.h32
-rw-r--r--include/drm/drm_device.h85
-rw-r--r--include/drm/drm_drv.h75
-rw-r--r--include/drm/drm_edid.h168
-rw-r--r--include/drm/drm_eld.h164
-rw-r--r--include/drm/drm_encoder.h16
-rw-r--r--include/drm/drm_exec.h150
-rw-r--r--include/drm/drm_fb_helper.h88
-rw-r--r--include/drm/drm_file.h74
-rw-r--r--include/drm/drm_fixed.h6
-rw-r--r--include/drm/drm_flip_work.h20
-rw-r--r--include/drm/drm_format_helper.h81
-rw-r--r--include/drm/drm_fourcc.h5
-rw-r--r--include/drm/drm_framebuffer.h17
-rw-r--r--include/drm/drm_gem.h123
-rw-r--r--include/drm/drm_gem_atomic_helper.h10
-rw-r--r--include/drm/drm_gem_dma_helper.h14
-rw-r--r--include/drm/drm_gem_shmem_helper.h19
-rw-r--r--include/drm/drm_gem_vram_helper.h9
-rw-r--r--include/drm/drm_gpuvm.h1247
-rw-r--r--include/drm/drm_ioctl.h11
-rw-r--r--include/drm/drm_kunit_helpers.h11
-rw-r--r--include/drm/drm_legacy.h331
-rw-r--r--include/drm/drm_mipi_dbi.h4
-rw-r--r--include/drm/drm_mipi_dsi.h2
-rw-r--r--include/drm/drm_mode_object.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h64
-rw-r--r--include/drm/drm_panel.h96
-rw-r--r--include/drm/drm_plane.h33
-rw-r--r--include/drm/drm_plane_helper.h2
-rw-r--r--include/drm/drm_print.h2
-rw-r--r--include/drm/drm_probe_helper.h1
-rw-r--r--include/drm/drm_property.h6
-rw-r--r--include/drm/drm_syncobj.h6
-rw-r--r--include/drm/drm_sysfs.h4
-rw-r--r--include/drm/gpu_scheduler.h76
-rw-r--r--include/drm/i915_component.h3
-rw-r--r--include/drm/i915_drm.h2
-rw-r--r--include/drm/i915_gsc_proxy_mei_interface.h53
-rw-r--r--include/drm/i915_hdcp_interface.h4
-rw-r--r--include/drm/i915_pciids.h29
-rw-r--r--include/drm/i915_pxp_tee_interface.h6
-rw-r--r--include/drm/task_barrier.h4
-rw-r--r--include/drm/ttm/ttm_bo.h2
-rw-r--r--include/drm/ttm/ttm_device.h4
-rw-r--r--include/drm/ttm/ttm_pool.h6
-rw-r--r--include/drm/ttm/ttm_tt.h2
-rw-r--r--include/drm/xe_pciids.h190
-rw-r--r--include/dt-bindings/arm/qcom,ids.h18
-rw-r--r--include/dt-bindings/ata/ahci.h2
-rw-r--r--include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h168
-rw-r--r--include/dt-bindings/clock/amlogic,a1-pll-clkc.h25
-rw-r--r--include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h236
-rw-r--r--include/dt-bindings/clock/amlogic,s4-pll-clkc.h43
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h12
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h65
-rw-r--r--include/dt-bindings/clock/axg-clkc.h48
-rw-r--r--include/dt-bindings/clock/exynos3250.h18
-rw-r--r--include/dt-bindings/clock/exynos4.h5
-rw-r--r--include/dt-bindings/clock/exynos5250.h3
-rw-r--r--include/dt-bindings/clock/exynos5260-clk.h25
-rw-r--r--include/dt-bindings/clock/exynos5410.h2
-rw-r--r--include/dt-bindings/clock/exynos5420.h3
-rw-r--r--include/dt-bindings/clock/exynos5433.h42
-rw-r--r--include/dt-bindings/clock/exynos7885.h4
-rw-r--r--include/dt-bindings/clock/exynos850.h10
-rw-r--r--include/dt-bindings/clock/g12a-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h138
-rw-r--r--include/dt-bindings/clock/google,gs101.h392
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h65
-rw-r--r--include/dt-bindings/clock/hi3559av100-clock.h2
-rw-r--r--include/dt-bindings/clock/imx8-clock.h28
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h3
-rw-r--r--include/dt-bindings/clock/imx93-clock.h3
-rw-r--r--include/dt-bindings/clock/intel,agilex5-clkmgr.h100
-rw-r--r--include/dt-bindings/clock/marvell,mmp2-audio.h1
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h1
-rw-r--r--include/dt-bindings/clock/marvell,pxa1928.h3
-rw-r--r--include/dt-bindings/clock/marvell,pxa910.h1
-rw-r--r--include/dt-bindings/clock/mediatek,mt7988-clk.h280
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h97
-rw-r--r--include/dt-bindings/clock/nuvoton,ma35d1-clk.h253
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq4019.h6
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq5018.h183
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8917.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8939.h6
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h3
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8280xp.h10
-rw-r--r--include/dt-bindings/clock/qcom,ipq9574-gcc.h6
-rw-r--r--include/dt-bindings/clock/qcom,lcc-mdm9615.h44
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8974.h1
-rw-r--r--include/dt-bindings/clock/qcom,qdu1000-ecpricc.h147
-rw-r--r--include/dt-bindings/clock/qcom,qdu1000-gcc.h4
-rw-r--r--include/dt-bindings/clock/qcom,sc8280xp-camcc.h179
-rw-r--r--include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h17
-rw-r--r--include/dt-bindings/clock/qcom,sdx75-gcc.h193
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-gcc.h197
-rw-r--r--include/dt-bindings/clock/qcom,sm8350-videocc.h35
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-gpucc.h48
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-videocc.h38
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-camcc.h187
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-gpucc.h48
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-dispcc.h102
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-gcc.h254
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-gpucc.h43
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-tcsr.h18
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sm8150.h4
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h485
-rw-r--r--include/dt-bindings/clock/r8a779f0-cpg-mssr.h2
-rw-r--r--include/dt-bindings/clock/r9a08g045-cpg.h242
-rw-r--r--include/dt-bindings/clock/rockchip,rk3588-cru.h2
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h18
-rw-r--r--include/dt-bindings/clock/sophgo,cv1800.h176
-rw-r--r--include/dt-bindings/clock/st,stm32mp25-rcc.h492
-rw-r--r--include/dt-bindings/clock/starfive,jh7110-crg.h80
-rw-r--r--include/dt-bindings/clock/stm32mp1-clks.h2
-rw-r--r--include/dt-bindings/clock/stm32mp13-clks.h6
-rw-r--r--include/dt-bindings/clock/sun20i-d1-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun20i-d1-r-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-a100-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-h6-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-h616-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun6i-rtc.h2
-rw-r--r--include/dt-bindings/display/sdtv-standards.h2
-rw-r--r--include/dt-bindings/dma/fsl-edma.h21
-rw-r--r--include/dt-bindings/firmware/qcom,scm.h21
-rw-r--r--include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h179
-rw-r--r--include/dt-bindings/gpio/amlogic-c3-gpio.h72
-rw-r--r--include/dt-bindings/gpio/meson-g12a-gpio.h2
-rw-r--r--include/dt-bindings/gpio/tegra234-gpio.h20
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h69
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h88
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h124
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h50
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h22
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h22
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h19
-rw-r--r--include/dt-bindings/iio/qcom,spmi-vadc.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8996-cbf.h12
-rw-r--r--include/dt-bindings/interconnect/qcom,rpm-icc.h13
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx75.h102
-rw-r--r--include/dt-bindings/interconnect/qcom,sm6115.h111
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8250.h7
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8650-rpmh.h154
-rw-r--r--include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h207
-rw-r--r--include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h126
-rw-r--r--include/dt-bindings/leds/leds-lp55xx.h10
-rw-r--r--include/dt-bindings/memory/mediatek,mt8188-memory-port.h489
-rw-r--r--include/dt-bindings/memory/tegra234-mc.h5
-rw-r--r--include/dt-bindings/mfd/stm32f7-rcc.h1
-rw-r--r--include/dt-bindings/mux/ti-serdes.h70
-rw-r--r--include/dt-bindings/pinctrl/k3.h60
-rw-r--r--include/dt-bindings/pinctrl/stm32-pinfunc.h3
-rw-r--r--include/dt-bindings/power/amlogic,c3-pwrc.h25
-rw-r--r--include/dt-bindings/power/amlogic,t7-pwrc.h63
-rw-r--r--include/dt-bindings/power/mediatek,mt8365-power.h19
-rw-r--r--include/dt-bindings/power/meson-a1-power.h2
-rw-r--r--include/dt-bindings/power/meson-axg-power.h2
-rw-r--r--include/dt-bindings/power/meson-g12a-power.h3
-rw-r--r--include/dt-bindings/power/meson-gxbb-power.h2
-rw-r--r--include/dt-bindings/power/meson-s4-power.h2
-rw-r--r--include/dt-bindings/power/meson-sm1-power.h2
-rw-r--r--include/dt-bindings/power/meson8-power.h2
-rw-r--r--include/dt-bindings/power/qcom,rpmhpd.h32
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h59
-rw-r--r--include/dt-bindings/power/r8a779f0-sysc.h2
-rw-r--r--include/dt-bindings/power/rk3588-power.h2
-rw-r--r--include/dt-bindings/power/starfive,jh7110-pmu.h6
-rw-r--r--include/dt-bindings/power/summit,smb347-charger.h2
-rw-r--r--include/dt-bindings/regulator/st,stm32mp13-regulator.h42
-rw-r--r--include/dt-bindings/reset/altr,rst-mgr-s10.h5
-rw-r--r--include/dt-bindings/reset/amlogic,c3-reset.h119
-rw-r--r--include/dt-bindings/reset/mediatek,mt7988-resets.h13
-rw-r--r--include/dt-bindings/reset/mt8188-resets.h80
-rw-r--r--include/dt-bindings/reset/nuvoton,ma35d1-reset.h108
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq5018.h122
-rw-r--r--include/dt-bindings/reset/qcom,ipq9574-gcc.h1
-rw-r--r--include/dt-bindings/reset/qcom,sm8350-videocc.h18
-rw-r--r--include/dt-bindings/reset/qcom,sm8450-gpucc.h20
-rw-r--r--include/dt-bindings/reset/qcom,sm8650-gpucc.h20
-rw-r--r--include/dt-bindings/reset/rockchip,rk3588-cru.h2
-rw-r--r--include/dt-bindings/reset/st,stm32mp25-rcc.h167
-rw-r--r--include/dt-bindings/reset/starfive,jh7110-crg.h60
-rw-r--r--include/dt-bindings/reset/stm32mp1-resets.h2
-rw-r--r--include/dt-bindings/reset/stm32mp13-resets.h4
-rw-r--r--include/dt-bindings/reset/sun20i-d1-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun20i-d1-r-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-a100-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-a100-r-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h6-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h6-r-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h616-ccu.h2
-rw-r--r--include/dt-bindings/soc/rockchip,vop2.h4
-rw-r--r--include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h8
-rw-r--r--include/dt-bindings/thermal/mediatek,lvts-thermal.h28
-rw-r--r--include/dt-bindings/thermal/tegra234-bpmp-thermal.h19
-rw-r--r--include/dt-bindings/watchdog/aspeed-wdt.h92
-rw-r--r--include/keys/asymmetric-parser.h2
-rw-r--r--include/keys/system_keyring.h14
-rw-r--r--include/kunit/attributes.h50
-rw-r--r--include/kunit/device.h80
-rw-r--r--include/kunit/resource.h113
-rw-r--r--include/kunit/skbuff.h56
-rw-r--r--include/kunit/static_stub.h8
-rw-r--r--include/kunit/test-bug.h2
-rw-r--r--include/kunit/test.h192
-rw-r--r--include/kvm/arm_arch_timer.h9
-rw-r--r--include/kvm/arm_pmu.h40
-rw-r--r--include/kvm/arm_psci.h2
-rw-r--r--include/kvm/arm_vgic.h6
-rw-r--r--include/kvm/iodev.h6
-rw-r--r--include/linux/acpi.h142
-rw-r--r--include/linux/acpi_agdi.h13
-rw-r--r--include/linux/acpi_amd_wbrf.h91
-rw-r--r--include/linux/acpi_apmt.h19
-rw-r--r--include/linux/acpi_iort.h6
-rw-r--r--include/linux/aer.h27
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/amba/clcd-regs.h87
-rw-r--r--include/linux/amba/clcd.h290
-rw-r--r--include/linux/amba/serial.h261
-rw-r--r--include/linux/amd-iommu.h121
-rw-r--r--include/linux/amd-pmf-io.h50
-rw-r--r--include/linux/amd-pstate.h8
-rw-r--r--include/linux/anon_inodes.h4
-rw-r--r--include/linux/aperture.h16
-rw-r--r--include/linux/apple-mailbox.h19
-rw-r--r--include/linux/arch_topology.h8
-rw-r--r--include/linux/args.h28
-rw-r--r--include/linux/arm-cci.h2
-rw-r--r--include/linux/arm-smccc.h71
-rw-r--r--include/linux/arm_ffa.h89
-rw-r--r--include/linux/arm_sdei.h2
-rw-r--r--include/linux/array_size.h13
-rw-r--r--include/linux/async.h2
-rw-r--r--include/linux/ata.h11
-rw-r--r--include/linux/atmel-mci.h46
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h5154
-rw-r--r--include/linux/atomic/atomic-instrumented.h3557
-rw-r--r--include/linux/atomic/atomic-long.h2122
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/audit_arch.h2
-rw-r--r--include/linux/avf/virtchnl.h178
-rw-r--r--include/linux/backing-dev-defs.h7
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/backing-file.h42
-rw-r--r--include/linux/badblocks.h30
-rw-r--r--include/linux/binfmts.h10
-rw-r--r--include/linux/bio.h44
-rw-r--r--include/linux/bitmap-str.h16
-rw-r--r--include/linux/bitmap.h95
-rw-r--r--include/linux/blk-crypto-profile.h1
-rw-r--r--include/linux/blk-mq.h96
-rw-r--r--include/linux/blk-pm.h1
-rw-r--r--include/linux/blk_types.h35
-rw-r--r--include/linux/blkdev.h302
-rw-r--r--include/linux/blktrace_api.h6
-rw-r--r--include/linux/bootmem_info.h2
-rw-r--r--include/linux/bpf-cgroup-defs.h5
-rw-r--r--include/linux/bpf-cgroup.h103
-rw-r--r--include/linux/bpf.h266
-rw-r--r--include/linux/bpf_mem_alloc.h11
-rw-r--r--include/linux/bpf_mprog.h343
-rw-r--r--include/linux/bpf_types.h4
-rw-r--r--include/linux/bpf_verifier.h280
-rw-r--r--include/linux/bpfilter.h25
-rw-r--r--include/linux/brcmphy.h77
-rw-r--r--include/linux/bsg.h2
-rw-r--r--include/linux/btf.h35
-rw-r--r--include/linux/btf_ids.h3
-rw-r--r--include/linux/buffer_head.h151
-rw-r--r--include/linux/buildid.h3
-rw-r--r--include/linux/bvec.h2
-rw-r--r--include/linux/cache.h31
-rw-r--r--include/linux/cacheflush.h13
-rw-r--r--include/linux/cacheinfo.h1
-rw-r--r--include/linux/can/dev.h4
-rw-r--r--include/linux/can/length.h302
-rw-r--r--include/linux/can/rx-offload.h11
-rw-r--r--include/linux/cdrom.h12
-rw-r--r--include/linux/cdx/cdx_bus.h72
-rw-r--r--include/linux/ceph/ceph_debug.h38
-rw-r--r--include/linux/ceph/ceph_fs.h52
-rw-r--r--include/linux/ceph/mdsmap.h72
-rw-r--r--include/linux/ceph/messenger.h40
-rw-r--r--include/linux/ceph/mon_client.h2
-rw-r--r--include/linux/ceph/osd_client.h99
-rw-r--r--include/linux/ceph/rados.h4
-rw-r--r--include/linux/cfi.h16
-rw-r--r--include/linux/cgroup-defs.h44
-rw-r--r--include/linux/cgroup.h18
-rw-r--r--include/linux/cleanup.h250
-rw-r--r--include/linux/clk-provider.h25
-rw-r--r--include/linux/clk.h80
-rw-r--r--include/linux/clk/mmp.h18
-rw-r--r--include/linux/closure.h415
-rw-r--r--include/linux/comedi/comedi_8254.h51
-rw-r--r--include/linux/comedi/comedi_8255.h24
-rw-r--r--include/linux/comedi/comedidev.h2
-rw-r--r--include/linux/compaction.h104
-rw-r--r--include/linux/compat.h83
-rw-r--r--include/linux/compiler-clang.h10
-rw-r--r--include/linux/compiler-gcc.h24
-rw-r--r--include/linux/compiler.h13
-rw-r--r--include/linux/compiler_attributes.h31
-rw-r--r--include/linux/compiler_types.h63
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/connector.h7
-rw-r--r--include/linux/console.h141
-rw-r--r--include/linux/const.h8
-rw-r--r--include/linux/container.h2
-rw-r--r--include/linux/context_tracking.h4
-rw-r--r--include/linux/context_tracking_state.h2
-rw-r--r--include/linux/coresight.h187
-rw-r--r--include/linux/counter.h2
-rw-r--r--include/linux/cper.h23
-rw-r--r--include/linux/cpu.h49
-rw-r--r--include/linux/cpu_smt.h33
-rw-r--r--include/linux/cpufreq.h33
-rw-r--r--include/linux/cpuhotplug.h42
-rw-r--r--include/linux/cpumask.h123
-rw-r--r--include/linux/cpuset.h18
-rw-r--r--include/linux/crash_core.h70
-rw-r--r--include/linux/crash_dump.h8
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/cred.h121
-rw-r--r--include/linux/crypto.h33
-rw-r--r--include/linux/cxl-event.h143
-rw-r--r--include/linux/damon.h126
-rw-r--r--include/linux/dax.h27
-rw-r--r--include/linux/dcache.h164
-rw-r--r--include/linux/debugfs.h19
-rw-r--r--include/linux/decompress/mm.h2
-rw-r--r--include/linux/delay.h1
-rw-r--r--include/linux/dev_printk.h2
-rw-r--r--include/linux/devfreq.h3
-rw-r--r--include/linux/device-mapper.h11
-rw-r--r--include/linux/device.h186
-rw-r--r--include/linux/device/bus.h7
-rw-r--r--include/linux/device/class.h2
-rw-r--r--include/linux/device/driver.h2
-rw-r--r--include/linux/dlm_plock.h2
-rw-r--r--include/linux/dm-bufio.h2
-rw-r--r--include/linux/dm-verity-loadpin.h2
-rw-r--r--include/linux/dma-buf.h11
-rw-r--r--include/linux/dma-direct.h19
-rw-r--r--include/linux/dma-fence.h37
-rw-r--r--include/linux/dma-map-ops.h82
-rw-r--r--include/linux/dma-mapping.h26
-rw-r--r--include/linux/dma/edma.h7
-rw-r--r--include/linux/dmaengine.h5
-rw-r--r--include/linux/dmar.h127
-rw-r--r--include/linux/dnotify.h4
-rw-r--r--include/linux/dpll.h181
-rw-r--r--include/linux/dsa/sja1105.h6
-rw-r--r--include/linux/dynamic_debug.h4
-rw-r--r--include/linux/edac.h7
-rw-r--r--include/linux/efi.h102
-rw-r--r--include/linux/elf-fdpic.h14
-rw-r--r--include/linux/energy_model.h7
-rw-r--r--include/linux/entry-common.h95
-rw-r--r--include/linux/err.h48
-rw-r--r--include/linux/ethtool.h112
-rw-r--r--include/linux/eventfd.h23
-rw-r--r--include/linux/evm.h20
-rw-r--r--include/linux/export-internal.h53
-rw-r--r--include/linux/export.h150
-rw-r--r--include/linux/exportfs.h88
-rw-r--r--include/linux/extcon.h12
-rw-r--r--include/linux/f2fs_fs.h73
-rw-r--r--include/linux/fault-inject.h9
-rw-r--r--include/linux/fb.h214
-rw-r--r--include/linux/fdtable.h36
-rw-r--r--include/linux/file.h18
-rw-r--r--include/linux/filelock.h12
-rw-r--r--include/linux/filter.h109
-rw-r--r--include/linux/firewire.h84
-rw-r--r--include/linux/firmware.h2
-rw-r--r--include/linux/firmware/imx/dsp.h6
-rw-r--r--include/linux/firmware/imx/sci.h16
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h25
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h5
-rw-r--r--include/linux/firmware/mediatek/mtk-adsp-ipc.h6
-rw-r--r--include/linux/firmware/meson/meson_sm.h2
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h52
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h121
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h86
-rw-r--r--include/linux/fortify-string.h214
-rw-r--r--include/linux/fprobe.h11
-rw-r--r--include/linux/framer/framer-provider.h194
-rw-r--r--include/linux/framer/framer.h205
-rw-r--r--include/linux/framer/pef2256.h31
-rw-r--r--include/linux/freelist.h129
-rw-r--r--include/linux/frontswap.h91
-rw-r--r--include/linux/fs.h500
-rw-r--r--include/linux/fs_context.h8
-rw-r--r--include/linux/fs_enet_pd.h165
-rw-r--r--include/linux/fs_stack.h8
-rw-r--r--include/linux/fs_uart_pd.h71
-rw-r--r--include/linux/fscache-cache.h3
-rw-r--r--include/linux/fscache.h45
-rw-r--r--include/linux/fscrypt.h82
-rw-r--r--include/linux/fsl/mc.h2
-rw-r--r--include/linux/fsnotify.h72
-rw-r--r--include/linux/fsnotify_backend.h17
-rw-r--r--include/linux/fsverity.h14
-rw-r--r--include/linux/ftrace.h22
-rw-r--r--include/linux/fw_table.h59
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/gameport.h13
-rw-r--r--include/linux/generic-radix-tree.h68
-rw-r--r--include/linux/genl_magic_func.h27
-rw-r--r--include/linux/genl_magic_struct.h8
-rw-r--r--include/linux/gfp.h34
-rw-r--r--include/linux/gfp_types.h17
-rw-r--r--include/linux/gpio.h23
-rw-r--r--include/linux/gpio/consumer.h16
-rw-r--r--include/linux/gpio/driver.h110
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/greybus/svc.h3
-rw-r--r--include/linux/habanalabs/cpucp_if.h1423
-rw-r--r--include/linux/habanalabs/hl_boot_if.h792
-rw-r--r--include/linux/hdmi.h4
-rw-r--r--include/linux/hid-roccat.h2
-rw-r--r--include/linux/hid.h36
-rw-r--r--include/linux/hid_bpf.h13
-rw-r--r--include/linux/highmem.h138
-rw-r--r--include/linux/hisi_acc_qm.h69
-rw-r--r--include/linux/hrtimer.h54
-rw-r--r--include/linux/hrtimer_types.h50
-rw-r--r--include/linux/hsi/ssi_protocol.h1
-rw-r--r--include/linux/huge_mm.h193
-rw-r--r--include/linux/hugetlb.h122
-rw-r--r--include/linux/hugetlb_cgroup.h11
-rw-r--r--include/linux/hw_breakpoint.h3
-rw-r--r--include/linux/hw_random.h1
-rw-r--r--include/linux/hwmon.h10
-rw-r--r--include/linux/hyperv.h31
-rw-r--r--include/linux/i2c-atr.h116
-rw-r--r--include/linux/i2c.h18
-rw-r--r--include/linux/i3c/device.h4
-rw-r--r--include/linux/i3c/master.h26
-rw-r--r--include/linux/i8042.h1
-rw-r--r--include/linux/i8254.h21
-rw-r--r--include/linux/icmpv6.h10
-rw-r--r--include/linux/idr.h6
-rw-r--r--include/linux/ieee80211.h663
-rw-r--r--include/linux/if_arp.h4
-rw-r--r--include/linux/if_team.h6
-rw-r--r--include/linux/if_vlan.h10
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h4
-rw-r--r--include/linux/iio/adc/adi-axi-adc.h4
-rw-r--r--include/linux/iio/buffer-dma.h7
-rw-r--r--include/linux/iio/common/inv_sensors_timestamp.h95
-rw-r--r--include/linux/iio/common/st_sensors.h5
-rw-r--r--include/linux/iio/consumer.h37
-rw-r--r--include/linux/iio/iio.h23
-rw-r--r--include/linux/iio/imu/adis.h3
-rw-r--r--include/linux/iio/sw_device.h3
-rw-r--r--include/linux/iio/sw_trigger.h3
-rw-r--r--include/linux/iio/trigger.h1
-rw-r--r--include/linux/iio/types.h3
-rw-r--r--include/linux/indirect_call_wrapper.h2
-rw-r--r--include/linux/init.h29
-rw-r--r--include/linux/init_task.h7
-rw-r--r--include/linux/input.h2
-rw-r--r--include/linux/input/as5011.h1
-rw-r--r--include/linux/input/mt.h2
-rw-r--r--include/linux/input/navpoint.h1
-rw-r--r--include/linux/instruction_pointer.h5
-rw-r--r--include/linux/int_log.h (renamed from include/media/dvb_math.h)18
-rw-r--r--include/linux/intel-ish-client-if.h3
-rw-r--r--include/linux/intel_rapl.h54
-rw-r--r--include/linux/intel_tpmi.h22
-rw-r--r--include/linux/interconnect-clk.h22
-rw-r--r--include/linux/interconnect-provider.h2
-rw-r--r--include/linux/interconnect.h8
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/io-pgtable.h38
-rw-r--r--include/linux/io.h5
-rw-r--r--include/linux/io_uring.h70
-rw-r--r--include/linux/io_uring/cmd.h77
-rw-r--r--include/linux/io_uring_types.h172
-rw-r--r--include/linux/iomap.h7
-rw-r--r--include/linux/iommu.h347
-rw-r--r--include/linux/iommufd.h9
-rw-r--r--include/linux/iopoll.h24
-rw-r--r--include/linux/ioport.h15
-rw-r--r--include/linux/ioprio.h25
-rw-r--r--include/linux/ioremap.h30
-rw-r--r--include/linux/iosys-map.h48
-rw-r--r--include/linux/iov_iter.h274
-rw-r--r--include/linux/iova_bitmap.h26
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipv6.h67
-rw-r--r--include/linux/irq.h71
-rw-r--r--include/linux/irq_work.h3
-rw-r--r--include/linux/irqchip/mmp.h10
-rw-r--r--include/linux/irqchip/mxs.h11
-rw-r--r--include/linux/irqdesc.h3
-rw-r--r--include/linux/irqdomain.h2
-rw-r--r--include/linux/irqflags.h21
-rw-r--r--include/linux/irqflags_types.h22
-rw-r--r--include/linux/iscsi_ibft.h10
-rw-r--r--include/linux/ism.h8
-rw-r--r--include/linux/iversion.h2
-rw-r--r--include/linux/jbd2.h96
-rw-r--r--include/linux/jiffies.h197
-rw-r--r--include/linux/jump_label.h2
-rw-r--r--include/linux/kallsyms.h17
-rw-r--r--include/linux/kasan.h174
-rw-r--r--include/linux/kcov.h17
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kernel.h44
-rw-r--r--include/linux/kernfs.h5
-rw-r--r--include/linux/kexec.h61
-rw-r--r--include/linux/key-type.h1
-rw-r--r--include/linux/key.h4
-rw-r--r--include/linux/kfence.h13
-rw-r--r--include/linux/kgdb.h1
-rw-r--r--include/linux/kmsan_types.h2
-rw-r--r--include/linux/kobject.h8
-rw-r--r--include/linux/kprobes.h28
-rw-r--r--include/linux/ksm.h30
-rw-r--r--include/linux/kstrtox.h5
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/ktime.h8
-rw-r--r--include/linux/kvm_host.h271
-rw-r--r--include/linux/kvm_types.h1
-rw-r--r--include/linux/leds.h124
-rw-r--r--include/linux/libata.h127
-rw-r--r--include/linux/libps2.h62
-rw-r--r--include/linux/limits.h2
-rw-r--r--include/linux/linkmode.h23
-rw-r--r--include/linux/list.h117
-rw-r--r--include/linux/list_lru.h88
-rw-r--r--include/linux/llist.h46
-rw-r--r--include/linux/lockd/bind.h2
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockdep.h72
-rw-r--r--include/linux/lockdep_types.h67
-rw-r--r--include/linux/logic_pio.h3
-rw-r--r--include/linux/lsm_hook_defs.h38
-rw-r--r--include/linux/lsm_hooks.h37
-rw-r--r--include/linux/lwq.h124
-rw-r--r--include/linux/maple.h1
-rw-r--r--include/linux/maple_tree.h302
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/math.h41
-rw-r--r--include/linux/math64.h2
-rw-r--r--include/linux/mc146818rtc.h3
-rw-r--r--include/linux/mcb.h1
-rw-r--r--include/linux/mdio.h78
-rw-r--r--include/linux/mdio/mdio-regmap.h26
-rw-r--r--include/linux/mei_cl_bus.h12
-rw-r--r--include/linux/memblock.h27
-rw-r--r--include/linux/memcontrol.h185
-rw-r--r--include/linux/memory-tiers.h45
-rw-r--r--include/linux/memory.h8
-rw-r--r--include/linux/memory_hotplug.h11
-rw-r--r--include/linux/mempolicy.h58
-rw-r--r--include/linux/mempool.h1
-rw-r--r--include/linux/memremap.h12
-rw-r--r--include/linux/mfd/88pm860x.h6
-rw-r--r--include/linux/mfd/abx500/ab8500.h10
-rw-r--r--include/linux/mfd/axp20x.h116
-rw-r--r--include/linux/mfd/core.h2
-rw-r--r--include/linux/mfd/cs42l43-regs.h1184
-rw-r--r--include/linux/mfd/cs42l43.h102
-rw-r--r--include/linux/mfd/dbx500-prcmu.h21
-rw-r--r--include/linux/mfd/hi655x-pmic.h1
-rw-r--r--include/linux/mfd/intel-m10-bmc.h43
-rw-r--r--include/linux/mfd/lpc_ich.h7
-rw-r--r--include/linux/mfd/max5970.h (renamed from include/linux/mfd/max597x.h)16
-rw-r--r--include/linux/mfd/max77541.h91
-rw-r--r--include/linux/mfd/max77686-private.h4
-rw-r--r--include/linux/mfd/max77693-private.h2
-rw-r--r--include/linux/mfd/max77843-private.h2
-rw-r--r--include/linux/mfd/mt6358/registers.h23
-rw-r--r--include/linux/mfd/rk808.h417
-rw-r--r--include/linux/mfd/rt5033-private.h64
-rw-r--r--include/linux/mfd/rt5033.h24
-rw-r--r--include/linux/mfd/rz-mtu3.h66
-rw-r--r--include/linux/mfd/si476x-platform.h2
-rw-r--r--include/linux/mfd/stm32-timers.h11
-rw-r--r--include/linux/mfd/stpmic1.h12
-rw-r--r--include/linux/mfd/tps65010.h11
-rw-r--r--include/linux/mfd/tps65086.h23
-rw-r--r--include/linux/mfd/tps65910.h2
-rw-r--r--include/linux/mfd/tps6594.h1020
-rw-r--r--include/linux/mfd/twl.h3
-rw-r--r--include/linux/mhi.h10
-rw-r--r--include/linux/mhi_ep.h36
-rw-r--r--include/linux/micrel_phy.h11
-rw-r--r--include/linux/migrate.h24
-rw-r--r--include/linux/mii_timestamper.h4
-rw-r--r--include/linux/minmax.h174
-rw-r--r--include/linux/misc_cgroup.h28
-rw-r--r--include/linux/mlx4/device.h20
-rw-r--r--include/linux/mlx4/driver.h42
-rw-r--r--include/linux/mlx5/device.h77
-rw-r--r--include/linux/mlx5/driver.h143
-rw-r--r--include/linux/mlx5/eswitch.h11
-rw-r--r--include/linux/mlx5/fs.h6
-rw-r--r--include/linux/mlx5/macsec.h32
-rw-r--r--include/linux/mlx5/mlx5_ifc.h326
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h11
-rw-r--r--include/linux/mlx5/port.h3
-rw-r--r--include/linux/mlx5/qp.h5
-rw-r--r--include/linux/mlx5/vport.h3
-rw-r--r--include/linux/mm.h1008
-rw-r--r--include/linux/mm_inline.h78
-rw-r--r--include/linux/mm_types.h315
-rw-r--r--include/linux/mm_types_task.h9
-rw-r--r--include/linux/mman.h5
-rw-r--r--include/linux/mmap_lock.h28
-rw-r--r--include/linux/mmc/card.h8
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/host.h9
-rw-r--r--include/linux/mmc/mmc.h10
-rw-r--r--include/linux/mmc/sdio_ids.h3
-rw-r--r--include/linux/mmdebug.h14
-rw-r--r--include/linux/mmu_notifier.h113
-rw-r--r--include/linux/mmzone.h187
-rw-r--r--include/linux/mnt_idmapping.h6
-rw-r--r--include/linux/mod_devicetable.h33
-rw-r--r--include/linux/module.h13
-rw-r--r--include/linux/module_symbol.h6
-rw-r--r--include/linux/moduleloader.h5
-rw-r--r--include/linux/moduleparam.h14
-rw-r--r--include/linux/mount.h12
-rw-r--r--include/linux/moxtet.h2
-rw-r--r--include/linux/mroute.h11
-rw-r--r--include/linux/mroute6.h31
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/mtd/blktrans.h2
-rw-r--r--include/linux/mtd/cfi.h2
-rw-r--r--include/linux/mtd/jedec.h3
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/onfi.h1
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/mtd/rawnand.h18
-rw-r--r--include/linux/mtd/spinand.h1
-rw-r--r--include/linux/mutex.h57
-rw-r--r--include/linux/mutex_types.h71
-rw-r--r--include/linux/namei.h27
-rw-r--r--include/linux/net.h12
-rw-r--r--include/linux/net/intel/i40e_client.h2
-rw-r--r--include/linux/net_tstamp.h30
-rw-r--r--include/linux/netdevice.h435
-rw-r--r--include/linux/netfilter.h25
-rw-r--r--include/linux/netfilter/ipset/ip_set.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h1
-rw-r--r--include/linux/netfilter_bridge.h6
-rw-r--r--include/linux/netfilter_ipv6.h8
-rw-r--r--include/linux/netfs.h185
-rw-r--r--include/linux/netlink.h10
-rw-r--r--include/linux/nfs4.h284
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/nfs_page.h4
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/nls.h2
-rw-r--r--include/linux/nmi.h97
-rw-r--r--include/linux/node.h8
-rw-r--r--include/linux/nodemask.h4
-rw-r--r--include/linux/nodemask_types.h10
-rw-r--r--include/linux/notifier.h11
-rw-r--r--include/linux/nsproxy.h8
-rw-r--r--include/linux/nubus.h3
-rw-r--r--include/linux/numa.h29
-rw-r--r--include/linux/nvme-auth.h7
-rw-r--r--include/linux/nvme-fc-driver.h10
-rw-r--r--include/linux/nvme-keyring.h28
-rw-r--r--include/linux/nvme-tcp.h6
-rw-r--r--include/linux/nvme.h73
-rw-r--r--include/linux/nvmem-consumer.h16
-rw-r--r--include/linux/nvmem-provider.h74
-rw-r--r--include/linux/objpool.h181
-rw-r--r--include/linux/objtool.h13
-rw-r--r--include/linux/of.h36
-rw-r--r--include/linux/of_device.h11
-rw-r--r--include/linux/of_iommu.h13
-rw-r--r--include/linux/of_platform.h10
-rw-r--r--include/linux/oid_registry.h24
-rw-r--r--include/linux/olpc-ec.h2
-rw-r--r--include/linux/osq_lock.h5
-rw-r--r--include/linux/overflow.h53
-rw-r--r--include/linux/page-flags.h118
-rw-r--r--include/linux/page-isolation.h23
-rw-r--r--include/linux/page_ext.h9
-rw-r--r--include/linux/page_idle.h5
-rw-r--r--include/linux/page_table_check.h71
-rw-r--r--include/linux/pageblock-flags.h4
-rw-r--r--include/linux/pagemap.h215
-rw-r--r--include/linux/pagevec.h67
-rw-r--r--include/linux/pagewalk.h11
-rw-r--r--include/linux/panic.h3
-rw-r--r--include/linux/parport.h2
-rw-r--r--include/linux/pci-ecam.h2
-rw-r--r--include/linux/pci-epc.h15
-rw-r--r--include/linux/pci-epf.h15
-rw-r--r--include/linux/pci.h144
-rw-r--r--include/linux/pci_ids.h130
-rw-r--r--include/linux/pcs-altera-tse.h17
-rw-r--r--include/linux/pcs-lynx.h5
-rw-r--r--include/linux/pcs/pcs-xpcs.h17
-rw-r--r--include/linux/pds/pds_adminq.h622
-rw-r--r--include/linux/pds/pds_common.h28
-rw-r--r--include/linux/pds/pds_core_if.h1
-rw-r--r--include/linux/peci.h4
-rw-r--r--include/linux/percpu-defs.h45
-rw-r--r--include/linux/percpu.h19
-rw-r--r--include/linux/percpu_counter.h71
-rw-r--r--include/linux/perf/arm_pmu.h33
-rw-r--r--include/linux/perf/arm_pmuv3.h43
-rw-r--r--include/linux/perf/riscv_pmu.h12
-rw-r--r--include/linux/perf_event.h89
-rw-r--r--include/linux/pgtable.h361
-rw-r--r--include/linux/phy.h196
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/phylib_stubs.h68
-rw-r--r--include/linux/phylink.h200
-rw-r--r--include/linux/pid.h142
-rw-r--r--include/linux/pid_namespace.h39
-rw-r--r--include/linux/pid_types.h16
-rw-r--r--include/linux/pinctrl/consumer.h61
-rw-r--r--include/linux/pinctrl/machine.h8
-rw-r--r--include/linux/pinctrl/pinconf-generic.h10
-rw-r--r--include/linux/pinctrl/pinconf.h16
-rw-r--r--include/linux/pinctrl/pinctrl.h24
-rw-r--r--include/linux/pinctrl/pinmux.h22
-rw-r--r--include/linux/pipe_fs_i.h30
-rw-r--r--include/linux/pktcdvd.h5
-rw-r--r--include/linux/platform_data/asoc-pxa.h1
-rw-r--r--include/linux/platform_data/bd6107.h2
-rw-r--r--include/linux/platform_data/cros_ec_commands.h66
-rw-r--r--include/linux/platform_data/cros_ec_proto.h2
-rw-r--r--include/linux/platform_data/davinci-cpufreq.h6
-rw-r--r--include/linux/platform_data/dma-ste-dma40.h209
-rw-r--r--include/linux/platform_data/gpio-omap.h3
-rw-r--r--include/linux/platform_data/gpio_backlight.h2
-rw-r--r--include/linux/platform_data/gsc_hwmon.h2
-rw-r--r--include/linux/platform_data/hirschmann-hellcreek.h2
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h2
-rw-r--r--include/linux/platform_data/keypad-omap.h3
-rw-r--r--include/linux/platform_data/lcd-mipid.h2
-rw-r--r--include/linux/platform_data/leds-lp55xx.h3
-rw-r--r--include/linux/platform_data/lv5207lp.h2
-rw-r--r--include/linux/platform_data/microchip-ksz.h23
-rw-r--r--include/linux/platform_data/mmc-omap.h2
-rw-r--r--include/linux/platform_data/omap-twl4030.h3
-rw-r--r--include/linux/platform_data/pca953x.h13
-rw-r--r--include/linux/platform_data/pxa2xx_udc.h6
-rw-r--r--include/linux/platform_data/rtc-ds2404.h20
-rw-r--r--include/linux/platform_data/shmob_drm.h57
-rw-r--r--include/linux/platform_data/sht3x.h15
-rw-r--r--include/linux/platform_data/si5351.h2
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h1
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/platform_data/video-mx3fb.h50
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h26
-rw-r--r--include/linux/platform_data/x86/clk-lpss.h2
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h5
-rw-r--r--include/linux/platform_data/x86/simatic-ipc.h6
-rw-r--r--include/linux/platform_device.h28
-rw-r--r--include/linux/plist.h12
-rw-r--r--include/linux/plist_types.h17
-rw-r--r--include/linux/pm.h57
-rw-r--r--include/linux/pm_clock.h4
-rw-r--r--include/linux/pm_domain.h23
-rw-r--r--include/linux/pm_opp.h122
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/pm_wakeirq.h10
-rw-r--r--include/linux/pm_wakeup.h10
-rw-r--r--include/linux/pnp.h10
-rw-r--r--include/linux/poison.h5
-rw-r--r--include/linux/posix-clock.h35
-rw-r--r--include/linux/posix-timers.h69
-rw-r--r--include/linux/posix-timers_types.h80
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/power/power_on_reason.h19
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/pr.h25
-rw-r--r--include/linux/prandom.h1
-rw-r--r--include/linux/preempt.h26
-rw-r--r--include/linux/prefetch.h7
-rw-r--r--include/linux/proc_fs.h4
-rw-r--r--include/linux/property.h61
-rw-r--r--include/linux/pruss_driver.h123
-rw-r--r--include/linux/pseudo_fs.h2
-rw-r--r--include/linux/psi.h5
-rw-r--r--include/linux/psi_types.h3
-rw-r--r--include/linux/psp-platform-access.h4
-rw-r--r--include/linux/ptp_clock_kernel.h11
-rw-r--r--include/linux/ptp_mock.h38
-rw-r--r--include/linux/ptrace.h4
-rw-r--r--include/linux/pwm.h118
-rw-r--r--include/linux/qed/qed_fcoe_if.h3
-rw-r--r--include/linux/quota.h4
-rw-r--r--include/linux/quotaops.h17
-rw-r--r--include/linux/raid/pq.h6
-rw-r--r--include/linux/raid_class.h4
-rw-r--r--include/linux/ramfs.h1
-rw-r--r--include/linux/range.h8
-rw-r--r--include/linux/rbtree_augmented.h26
-rw-r--r--include/linux/rbtree_latch.h2
-rw-r--r--include/linux/rcu_notifier.h32
-rw-r--r--include/linux/rculist.h2
-rw-r--r--include/linux/rculist_nulls.h4
-rw-r--r--include/linux/rcupdate.h68
-rw-r--r--include/linux/rcupdate_trace.h1
-rw-r--r--include/linux/rcupdate_wait.h15
-rw-r--r--include/linux/rcutiny.h4
-rw-r--r--include/linux/rcutree.h18
-rw-r--r--include/linux/rcuwait.h23
-rw-r--r--include/linux/reboot.h15
-rw-r--r--include/linux/ref_tracker.h25
-rw-r--r--include/linux/refcount.h13
-rw-r--r--include/linux/refcount_types.h19
-rw-r--r--include/linux/regmap.h34
-rw-r--r--include/linux/regulator/consumer.h47
-rw-r--r--include/linux/regulator/db8500-prcmu.h6
-rw-r--r--include/linux/regulator/driver.h20
-rw-r--r--include/linux/regulator/machine.h18
-rw-r--r--include/linux/regulator/mt6358-regulator.h13
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/resctrl.h4
-rw-r--r--include/linux/restart_block.h2
-rw-r--r--include/linux/resume_user_mode.h3
-rw-r--r--include/linux/rethook.h26
-rw-r--r--include/linux/rhashtable-types.h2
-rw-r--r--include/linux/ring_buffer.h18
-rw-r--r--include/linux/rmap.h412
-rw-r--r--include/linux/root_dev.h9
-rw-r--r--include/linux/rpmsg.h15
-rw-r--r--include/linux/rseq.h131
-rw-r--r--include/linux/rslib.h1
-rw-r--r--include/linux/rtc.h18
-rw-r--r--include/linux/rtnetlink.h41
-rw-r--r--include/linux/rtsx_pci.h8
-rw-r--r--include/linux/rwsem.h8
-rw-r--r--include/linux/scatterlist.h84
-rw-r--r--include/linux/sched.h469
-rw-r--r--include/linux/sched/clock.h17
-rw-r--r--include/linux/sched/coredump.h19
-rw-r--r--include/linux/sched/deadline.h4
-rw-r--r--include/linux/sched/isolation.h4
-rw-r--r--include/linux/sched/mm.h4
-rw-r--r--include/linux/sched/numa_balancing.h16
-rw-r--r--include/linux/sched/rt.h4
-rw-r--r--include/linux/sched/sd_flags.h12
-rw-r--r--include/linux/sched/signal.h53
-rw-r--r--include/linux/sched/smt.h2
-rw-r--r--include/linux/sched/task.h44
-rw-r--r--include/linux/sched/task_stack.h1
-rw-r--r--include/linux/sched/topology.h20
-rw-r--r--include/linux/sched/types.h2
-rw-r--r--include/linux/sched/vhost_task.h7
-rw-r--r--include/linux/scmi_protocol.h61
-rw-r--r--include/linux/seccomp.h24
-rw-r--r--include/linux/seccomp_types.h35
-rw-r--r--include/linux/secretmem.h15
-rw-r--r--include/linux/security.h104
-rw-r--r--include/linux/sed-opal-key.h26
-rw-r--r--include/linux/sed-opal.h5
-rw-r--r--include/linux/sem.h10
-rw-r--r--include/linux/sem_types.h13
-rw-r--r--include/linux/seq_buf.h40
-rw-r--r--include/linux/seq_file.h22
-rw-r--r--include/linux/seqlock.h148
-rw-r--r--include/linux/seqlock_types.h93
-rw-r--r--include/linux/serdev.h29
-rw-r--r--include/linux/serial_8250.h45
-rw-r--r--include/linux/serial_core.h153
-rw-r--r--include/linux/sfp.h14
-rw-r--r--include/linux/sh_intc.h6
-rw-r--r--include/linux/shm.h4
-rw-r--r--include/linux/shmem_fs.h45
-rw-r--r--include/linux/shrinker.h87
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/signal_types.h2
-rw-r--r--include/linux/sizes.h9
-rw-r--r--include/linux/skbuff.h212
-rw-r--r--include/linux/skmsg.h13
-rw-r--r--include/linux/slab.h93
-rw-r--r--include/linux/slab_def.h124
-rw-r--r--include/linux/slub_def.h198
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/smscphy.h34
-rw-r--r--include/linux/soc/apple/rtkit.h18
-rw-r--r--include/linux/soc/mediatek/infracfg.h41
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h11
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h76
-rw-r--r--include/linux/soc/pxa/smemc.h16
-rw-r--r--include/linux/soc/qcom/geni-se.h13
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h2
-rw-r--r--include/linux/soc/qcom/qcom_aoss.h4
-rw-r--r--include/linux/soc/qcom/smd-rpm.h20
-rw-r--r--include/linux/soc/qcom/smem.h3
-rw-r--r--include/linux/soc/qcom/socinfo.h77
-rw-r--r--include/linux/socket.h7
-rw-r--r--include/linux/sockptr.h23
-rw-r--r--include/linux/soundwire/sdw.h40
-rw-r--r--include/linux/soundwire/sdw_intel.h106
-rw-r--r--include/linux/spi/ads7846.h2
-rw-r--r--include/linux/spi/corgi_lcd.h2
-rw-r--r--include/linux/spi/pxa2xx_spi.h4
-rw-r--r--include/linux/spi/sh_msiof.h4
-rw-r--r--include/linux/spi/spi-mem.h6
-rw-r--r--include/linux/spi/spi.h266
-rw-r--r--include/linux/spinlock.h103
-rw-r--r--include/linux/splice.h47
-rw-r--r--include/linux/spmi.h5
-rw-r--r--include/linux/sprintf.h27
-rw-r--r--include/linux/srcu.h13
-rw-r--r--include/linux/srcutiny.h4
-rw-r--r--include/linux/stackdepot.h61
-rw-r--r--include/linux/stackleak.h6
-rw-r--r--include/linux/stmmac.h52
-rw-r--r--include/linux/string.h53
-rw-r--r--include/linux/string_choices.h45
-rw-r--r--include/linux/string_helpers.h32
-rw-r--r--include/linux/stringify.h2
-rw-r--r--include/linux/sunrpc/auth.h2
-rw-r--r--include/linux/sunrpc/bc_xprt.h3
-rw-r--r--include/linux/sunrpc/cache.h12
-rw-r--r--include/linux/sunrpc/clnt.h17
-rw-r--r--include/linux/sunrpc/sched.h14
-rw-r--r--include/linux/sunrpc/stats.h23
-rw-r--r--include/linux/sunrpc/svc.h158
-rw-r--r--include/linux/sunrpc/svc_rdma.h72
-rw-r--r--include/linux/sunrpc/svc_xprt.h40
-rw-r--r--include/linux/sunrpc/svcauth.h60
-rw-r--r--include/linux/sunrpc/svcsock.h9
-rw-r--r--include/linux/sunrpc/xdr.h9
-rw-r--r--include/linux/sunrpc/xprt.h34
-rw-r--r--include/linux/sunrpc/xprtsock.h3
-rw-r--r--include/linux/superhyway.h107
-rw-r--r--include/linux/surface_aggregator/controller.h2
-rw-r--r--include/linux/surface_aggregator/device.h1
-rw-r--r--include/linux/surface_aggregator/serial_hub.h4
-rw-r--r--include/linux/suspend.h27
-rw-r--r--include/linux/swait.h2
-rw-r--r--include/linux/swap.h64
-rw-r--r--include/linux/swapfile.h5
-rw-r--r--include/linux/swapops.h32
-rw-r--r--include/linux/swiotlb.h142
-rw-r--r--include/linux/switchtec.h1
-rw-r--r--include/linux/syscall_user_dispatch.h9
-rw-r--r--include/linux/syscall_user_dispatch_types.h22
-rw-r--r--include/linux/syscalls.h191
-rw-r--r--include/linux/sysctl.h85
-rw-r--r--include/linux/sysfb.h3
-rw-r--r--include/linux/sysfs.h2
-rw-r--r--include/linux/sysrq.h18
-rw-r--r--include/linux/tca6416_keypad.h1
-rw-r--r--include/linux/tcp.h311
-rw-r--r--include/linux/tee_drv.h16
-rw-r--r--include/linux/tegra-icc.h65
-rw-r--r--include/linux/thermal.h139
-rw-r--r--include/linux/thread_info.h5
-rw-r--r--include/linux/thunderbolt.h20
-rw-r--r--include/linux/ti_wilink_st.h2
-rw-r--r--include/linux/tick.h10
-rw-r--r--include/linux/time_namespace.h6
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--include/linux/timer.h19
-rw-r--r--include/linux/timer_types.h23
-rw-r--r--include/linux/timerqueue.h13
-rw-r--r--include/linux/timerqueue_types.h17
-rw-r--r--include/linux/tnum.h4
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/torture.h16
-rw-r--r--include/linux/tpm.h1
-rw-r--r--include/linux/trace.h4
-rw-r--r--include/linux/trace_events.h36
-rw-r--r--include/linux/trace_seq.h17
-rw-r--r--include/linux/tracefs.h66
-rw-r--r--include/linux/tracepoint-defs.h1
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--include/linux/tsm.h69
-rw-r--r--include/linux/tty.h51
-rw-r--r--include/linux/tty_buffer.h20
-rw-r--r--include/linux/tty_driver.h14
-rw-r--r--include/linux/tty_flip.h70
-rw-r--r--include/linux/tty_ldisc.h67
-rw-r--r--include/linux/tty_port.h15
-rw-r--r--include/linux/types.h9
-rw-r--r--include/linux/uacce.h4
-rw-r--r--include/linux/ucs2_string.h1
-rw-r--r--include/linux/udp.h66
-rw-r--r--include/linux/uidgid.h24
-rw-r--r--include/linux/uidgid_types.h15
-rw-r--r--include/linux/uio.h86
-rw-r--r--include/linux/ulpi/driver.h2
-rw-r--r--include/linux/umh.h2
-rw-r--r--include/linux/units.h5
-rw-r--r--include/linux/usb.h59
-rw-r--r--include/linux/usb/ch9.h5
-rw-r--r--include/linux/usb/chipidea.h2
-rw-r--r--include/linux/usb/composite.h31
-rw-r--r--include/linux/usb/gadget.h9
-rw-r--r--include/linux/usb/hcd.h26
-rw-r--r--include/linux/usb/ljca.h145
-rw-r--r--include/linux/usb/midi-v2.h94
-rw-r--r--include/linux/usb/musb.h13
-rw-r--r--include/linux/usb/pd.h1
-rw-r--r--include/linux/usb/pd_vdo.h1
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb/r8152.h2
-rw-r--r--include/linux/usb/renesas_usbhs.h10
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/tcpci.h6
-rw-r--r--include/linux/usb/tcpm.h1
-rw-r--r--include/linux/usb/typec.h37
-rw-r--r--include/linux/usb/typec_altmode.h2
-rw-r--r--include/linux/usb/typec_dp.h28
-rw-r--r--include/linux/usb/typec_mux.h11
-rw-r--r--include/linux/usb/typec_tbt.h1
-rw-r--r--include/linux/user_namespace.h8
-rw-r--r--include/linux/userfaultfd_k.h49
-rw-r--r--include/linux/vdpa.h45
-rw-r--r--include/linux/verification.h1
-rw-r--r--include/linux/vfio.h72
-rw-r--r--include/linux/vfio_pci_core.h46
-rw-r--r--include/linux/vgaarb.h27
-rw-r--r--include/linux/via-gpio.h14
-rw-r--r--include/linux/virtio.h37
-rw-r--r--include/linux/virtio_config.h4
-rw-r--r--include/linux/virtio_console.h38
-rw-r--r--include/linux/virtio_net.h32
-rw-r--r--include/linux/virtio_pci_admin.h23
-rw-r--r--include/linux/virtio_pci_modern.h44
-rw-r--r--include/linux/virtio_vsock.h11
-rw-r--r--include/linux/vlynq.h149
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmstat.h60
-rw-r--r--include/linux/w1-gpio.h22
-rw-r--r--include/linux/wait.h13
-rw-r--r--include/linux/watch_queue.h5
-rw-r--r--include/linux/wmi.h53
-rw-r--r--include/linux/workqueue.h179
-rw-r--r--include/linux/workqueue_types.h25
-rw-r--r--include/linux/writeback.h8
-rw-r--r--include/linux/xarray.h18
-rw-r--r--include/linux/xattr.h10
-rw-r--r--include/linux/zpool.h20
-rw-r--r--include/linux/zswap.h67
-rw-r--r--include/media/cec.h35
-rw-r--r--include/media/davinci/vpif_types.h2
-rw-r--r--include/media/dvbdev.h6
-rw-r--r--include/media/i2c/ds90ub9xx.h22
-rw-r--r--include/media/ipu-bridge.h181
-rw-r--r--include/media/jpeg.h20
-rw-r--r--include/media/media-entity.h6
-rw-r--r--include/media/mipi-csi2.h1
-rw-r--r--include/media/ov_16bit_addr_reg_helpers.h92
-rw-r--r--include/media/v4l2-async.h238
-rw-r--r--include/media/v4l2-cci.h141
-rw-r--r--include/media/v4l2-common.h6
-rw-r--r--include/media/v4l2-ctrls.h8
-rw-r--r--include/media/v4l2-dev.h2
-rw-r--r--include/media/v4l2-device.h4
-rw-r--r--include/media/v4l2-event.h2
-rw-r--r--include/media/v4l2-fwnode.h70
-rw-r--r--include/media/v4l2-mc.h6
-rw-r--r--include/media/v4l2-mem2mem.h27
-rw-r--r--include/media/v4l2-subdev.h453
-rw-r--r--include/media/videobuf-core.h233
-rw-r--r--include/media/videobuf-dma-contig.h30
-rw-r--r--include/media/videobuf-dma-sg.h102
-rw-r--r--include/media/videobuf-vmalloc.h43
-rw-r--r--include/media/videobuf2-core.h48
-rw-r--r--include/net/Space.h8
-rw-r--r--include/net/act_api.h6
-rw-r--r--include/net/addrconf.h12
-rw-r--r--include/net/af_rxrpc.h15
-rw-r--r--include/net/af_unix.h21
-rw-r--r--include/net/af_vsock.h11
-rw-r--r--include/net/bluetooth/bluetooth.h15
-rw-r--r--include/net/bluetooth/hci.h35
-rw-r--r--include/net/bluetooth/hci_core.h207
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/bluetooth/hci_sync.h7
-rw-r--r--include/net/bluetooth/mgmt.h3
-rw-r--r--include/net/bluetooth/sco.h2
-rw-r--r--include/net/bond_3ad.h1
-rw-r--r--include/net/bonding.h17
-rw-r--r--include/net/busy_poll.h1
-rw-r--r--include/net/caif/cfsrvl.h3
-rw-r--r--include/net/cfg80211.h660
-rw-r--r--include/net/cfg802154.h78
-rw-r--r--include/net/codel.h4
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/devlink.h370
-rw-r--r--include/net/dropreason-core.h57
-rw-r--r--include/net/dropreason.h6
-rw-r--r--include/net/dsa.h90
-rw-r--r--include/net/dsa_stubs.h22
-rw-r--r--include/net/dst.h11
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/fib_rules.h3
-rw-r--r--include/net/flow.h5
-rw-r--r--include/net/flow_dissector.h37
-rw-r--r--include/net/flow_offload.h8
-rw-r--r--include/net/fq.h5
-rw-r--r--include/net/genetlink.h129
-rw-r--r--include/net/gro.h72
-rw-r--r--include/net/gso.h109
-rw-r--r--include/net/handshake.h5
-rw-r--r--include/net/ieee80211_radiotap.h11
-rw-r--r--include/net/ieee802154_netdev.h80
-rw-r--r--include/net/if_inet6.h6
-rw-r--r--include/net/ila.h16
-rw-r--r--include/net/inet6_hashtables.h81
-rw-r--r--include/net/inet_common.h7
-rw-r--r--include/net/inet_connection_sock.h37
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/inet_hashtables.h97
-rw-r--r--include/net/inet_sock.h114
-rw-r--r--include/net/inet_timewait_sock.h7
-rw-r--r--include/net/ip.h65
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/ip6_route.h21
-rw-r--r--include/net/ip_fib.h8
-rw-r--r--include/net/ip_tunnels.h27
-rw-r--r--include/net/ipv6.h56
-rw-r--r--include/net/ipv6_stubs.h8
-rw-r--r--include/net/iucv/iucv.h4
-rw-r--r--include/net/iw_handler.h11
-rw-r--r--include/net/kcm.h2
-rw-r--r--include/net/llc_c_ac.h1
-rw-r--r--include/net/llc_c_ev.h1
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/llc_pdu.h12
-rw-r--r--include/net/lwtunnel.h5
-rw-r--r--include/net/mac80211.h283
-rw-r--r--include/net/macsec.h67
-rw-r--r--include/net/mana/gdma.h32
-rw-r--r--include/net/mana/hw_channel.h7
-rw-r--r--include/net/mana/mana.h123
-rw-r--r--include/net/mctp.h5
-rw-r--r--include/net/mptcp.h21
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/neighbour.h6
-rw-r--r--include/net/net_namespace.h29
-rw-r--r--include/net/netdev_queues.h2
-rw-r--r--include/net/netdev_rx_queue.h57
-rw-r--r--include/net/netfilter/nf_conntrack.h18
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h2
-rw-r--r--include/net/netfilter/nf_conntrack_act_ct.h30
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h20
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h3
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h3
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h3
-rw-r--r--include/net/netfilter/nf_flow_table.h24
-rw-r--r--include/net/netfilter/nf_tables.h296
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h2
-rw-r--r--include/net/netkit.h44
-rw-r--r--include/net/netlink.h128
-rw-r--r--include/net/netns/conntrack.h2
-rw-r--r--include/net/netns/core.h1
-rw-r--r--include/net/netns/ipv4.h57
-rw-r--r--include/net/netns/nftables.h2
-rw-r--r--include/net/netns/smc.h2
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/nexthop.h8
-rw-r--r--include/net/nl802154.h22
-rw-r--r--include/net/nsh.h2
-rw-r--r--include/net/p8022.h3
-rw-r--r--include/net/page_pool.h402
-rw-r--r--include/net/page_pool/helpers.h409
-rw-r--r--include/net/page_pool/types.h253
-rw-r--r--include/net/phonet/phonet.h21
-rw-r--r--include/net/pie.h2
-rw-r--r--include/net/pkt_cls.h15
-rw-r--r--include/net/pkt_sched.h84
-rw-r--r--include/net/regulatory.h14
-rw-r--r--include/net/route.h28
-rw-r--r--include/net/rpl.h3
-rw-r--r--include/net/rsi_91x.h2
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sch_generic.h91
-rw-r--r--include/net/scm.h85
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/sm.h3
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/smc.h16
-rw-r--r--include/net/sock.h171
-rw-r--r--include/net/switchdev.h15
-rw-r--r--include/net/tc_act/tc_ct.h10
-rw-r--r--include/net/tc_act/tc_ipt.h17
-rw-r--r--include/net/tc_act/tc_mirred.h1
-rw-r--r--include/net/tc_wrapper.h4
-rw-r--r--include/net/tcp.h491
-rw-r--r--include/net/tcp_ao.h387
-rw-r--r--include/net/tcp_states.h2
-rw-r--r--include/net/tcx.h201
-rw-r--r--include/net/tls.h50
-rw-r--r--include/net/tls_prot.h68
-rw-r--r--include/net/transp_v6.h2
-rw-r--r--include/net/udp.h9
-rw-r--r--include/net/udp_tunnel.h30
-rw-r--r--include/net/udplite.h16
-rw-r--r--include/net/vxlan.h54
-rw-r--r--include/net/xdp.h70
-rw-r--r--include/net/xdp_sock.h136
-rw-r--r--include/net/xdp_sock_drv.h130
-rw-r--r--include/net/xfrm.h30
-rw-r--r--include/net/xsk_buff_pool.h19
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_umem.h9
-rw-r--r--include/rdma/ib_verbs.h11
-rw-r--r--include/rdma/iw_cm.h21
-rw-r--r--include/rdma/uverbs_ioctl.h6
-rw-r--r--include/rv/da_monitor.h2
-rw-r--r--include/scsi/libsas.h49
-rw-r--r--include/scsi/scsi.h3
-rw-r--r--include/scsi/scsi_cmnd.h5
-rw-r--r--include/scsi/scsi_common.h13
-rw-r--r--include/scsi/scsi_device.h52
-rw-r--r--include/scsi/scsi_host.h13
-rw-r--r--include/scsi/scsi_ioctl.h4
-rw-r--r--include/scsi/scsi_proto.h5
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/soc/at91/atmel_tcb.h3
-rw-r--r--include/soc/fsl/qe/qe.h2
-rw-r--r--include/soc/fsl/qe/qmc.h27
-rw-r--r--include/soc/imx/revision.h1
-rw-r--r--include/soc/imx/timer.h16
-rw-r--r--include/soc/mediatek/smi.h1
-rw-r--r--include/soc/microchip/mpfs.h2
-rw-r--r--include/soc/mscc/ocelot.h20
-rw-r--r--include/soc/qcom/qcom-spmi-pmic.h1
-rw-r--r--include/soc/rockchip/rk3399_grf.h9
-rw-r--r--include/soc/rockchip/rk3568_grf.h13
-rw-r--r--include/soc/rockchip/rk3588_grf.h18
-rw-r--r--include/soc/rockchip/rockchip_grf.h18
-rw-r--r--include/soc/tegra/bpmp-abi.h2
-rw-r--r--include/soc/tegra/bpmp.h6
-rw-r--r--include/soc/tegra/fuse.h3
-rw-r--r--include/soc/tegra/mc.h36
-rw-r--r--include/sound/ac97_codec.h2
-rw-r--r--include/sound/asequencer.h4
-rw-r--r--include/sound/compress_driver.h2
-rw-r--r--include/sound/control.h31
-rw-r--r--include/sound/core.h8
-rw-r--r--include/sound/cs35l41.h10
-rw-r--r--include/sound/cs35l56.h40
-rw-r--r--include/sound/cs4271.h1
-rw-r--r--include/sound/cs42l43.h17
-rw-r--r--include/sound/da7219-aad.h6
-rw-r--r--include/sound/designware_i2s.h3
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/emu10k1.h309
-rw-r--r--include/sound/emux_synth.h2
-rw-r--r--include/sound/graph_card.h16
-rw-r--r--include/sound/hda-mlink.h4
-rw-r--r--include/sound/hda_codec.h8
-rw-r--r--include/sound/hda_register.h2
-rw-r--r--include/sound/hdaudio.h48
-rw-r--r--include/sound/hdaudio_ext.h3
-rw-r--r--include/sound/hwdep.h2
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/l3.h28
-rw-r--r--include/sound/max9768.h4
-rw-r--r--include/sound/opl3.h2
-rw-r--r--include/sound/pcm.h22
-rw-r--r--include/sound/pcm_params.h2
-rw-r--r--include/sound/rawmidi.h17
-rw-r--r--include/sound/rt5665.h2
-rw-r--r--include/sound/rt5668.h3
-rw-r--r--include/sound/rt5682.h3
-rw-r--r--include/sound/rt5682s.h11
-rw-r--r--include/sound/seq_device.h1
-rw-r--r--include/sound/seq_kernel.h10
-rw-r--r--include/sound/simple_card.h6
-rw-r--r--include/sound/simple_card_utils.h113
-rw-r--r--include/sound/soc-acpi-intel-match.h4
-rw-r--r--include/sound/soc-acpi.h14
-rw-r--r--include/sound/soc-card.h43
-rw-r--r--include/sound/soc-component.h29
-rw-r--r--include/sound/soc-dai.h30
-rw-r--r--include/sound/soc-dapm.h142
-rw-r--r--include/sound/soc-dmaengine.h26
-rw-r--r--include/sound/soc.h146
-rw-r--r--include/sound/sof.h27
-rw-r--r--include/sound/sof/dai-imx.h7
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/ipc4/header.h67
-rw-r--r--include/sound/sof/topology.h65
-rw-r--r--include/sound/tas2781-dsp.h188
-rw-r--r--include/sound/tas2781-tlv.h21
-rw-r--r--include/sound/tas2781.h174
-rw-r--r--include/sound/uda134x.h24
-rw-r--r--include/sound/ump.h269
-rw-r--r--include/sound/ump_convert.h46
-rw-r--r--include/sound/ump_msg.h765
-rw-r--r--include/sound/wavefront.h53
-rw-r--r--include/sound/wm0010.h6
-rw-r--r--include/sound/wm1250-ev1.h24
-rw-r--r--include/sound/wm2200.h2
-rw-r--r--include/sound/wm5100.h4
-rw-r--r--include/sound/wm8996.h3
-rw-r--r--include/target/iscsi/iscsi_target_core.h4
-rw-r--r--include/target/target_core_backend.h8
-rw-r--r--include/target/target_core_base.h13
-rw-r--r--include/target/target_core_fabric.h19
-rw-r--r--include/trace/bpf_probe.h2
-rw-r--r--include/trace/events/9p.h11
-rw-r--r--include/trace/events/afs.h825
-rw-r--r--include/trace/events/block.h28
-rw-r--r--include/trace/events/btrfs.h210
-rw-r--r--include/trace/events/compaction.h11
-rw-r--r--include/trace/events/csd.h72
-rw-r--r--include/trace/events/damon.h45
-rw-r--r--include/trace/events/dlm.h51
-rw-r--r--include/trace/events/erofs.h16
-rw-r--r--include/trace/events/ext4.h55
-rw-r--r--include/trace/events/f2fs.h151
-rw-r--r--include/trace/events/fib.h2
-rw-r--r--include/trace/events/fib6.h2
-rw-r--r--include/trace/events/fsi.h31
-rw-r--r--include/trace/events/fsi_master_i2cr.h107
-rw-r--r--include/trace/events/habanalabs.h45
-rw-r--r--include/trace/events/handshake.h160
-rw-r--r--include/trace/events/intel_ifs.h16
-rw-r--r--include/trace/events/jbd2.h12
-rw-r--r--include/trace/events/ksm.h33
-rw-r--r--include/trace/events/kvm.h8
-rw-r--r--include/trace/events/kyber.h8
-rw-r--r--include/trace/events/migrate.h24
-rw-r--r--include/trace/events/mmflags.h6
-rw-r--r--include/trace/events/mptcp.h2
-rw-r--r--include/trace/events/neigh.h4
-rw-r--r--include/trace/events/net.h3
-rw-r--r--include/trace/events/netfs.h155
-rw-r--r--include/trace/events/page_pool.h2
-rw-r--r--include/trace/events/qdisc.h20
-rw-r--r--include/trace/events/rpcrdma.h256
-rw-r--r--include/trace/events/rxrpc.h13
-rw-r--r--include/trace/events/sched.h72
-rw-r--r--include/trace/events/scsi.h21
-rw-r--r--include/trace/events/spi.h2
-rw-r--r--include/trace/events/sunrpc.h217
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/trace/events/tcp.h5
-rw-r--r--include/trace/events/thp.h33
-rw-r--r--include/trace/events/timer.h46
-rw-r--r--include/trace/events/ufs.h15
-rw-r--r--include/trace/events/vmscan.h8
-rw-r--r--include/trace/events/vsock_virtio_transport_common.h12
-rw-r--r--include/trace/events/wbt.h8
-rw-r--r--include/trace/events/xdp.h18
-rw-r--r--include/trace/events/xen.h12
-rw-r--r--include/uapi/asm-generic/bitsperlong.h13
-rw-r--r--include/uapi/asm-generic/siginfo.h8
-rw-r--r--include/uapi/asm-generic/socket.h3
-rw-r--r--include/uapi/asm-generic/unistd.h158
-rw-r--r--include/uapi/drm/amdgpu_drm.h65
-rw-r--r--include/uapi/drm/drm.h176
-rw-r--r--include/uapi/drm/drm_fourcc.h55
-rw-r--r--include/uapi/drm/drm_mode.h73
-rw-r--r--include/uapi/drm/habanalabs_accel.h106
-rw-r--r--include/uapi/drm/i915_drm.h115
-rw-r--r--include/uapi/drm/ivpu_accel.h51
-rw-r--r--include/uapi/drm/msm_drm.h3
-rw-r--r--include/uapi/drm/nouveau_drm.h295
-rw-r--r--include/uapi/drm/pvr_drm.h1295
-rw-r--r--include/uapi/drm/qaic_accel.h29
-rw-r--r--include/uapi/drm/v3d_drm.h245
-rw-r--r--include/uapi/drm/virtgpu_drm.h18
-rw-r--r--include/uapi/drm/xe_drm.h1327
-rw-r--r--include/uapi/linux/affs_hardblocks.h68
-rw-r--r--include/uapi/linux/android/binder.h30
-rw-r--r--include/uapi/linux/auto_dev-ioctl.h2
-rw-r--r--include/uapi/linux/batadv_packet.h45
-rw-r--r--include/uapi/linux/blkzoned.h10
-rw-r--r--include/uapi/linux/bpf.h275
-rw-r--r--include/uapi/linux/bpfilter.h21
-rw-r--r--include/uapi/linux/btrfs.h6
-rw-r--r--include/uapi/linux/btrfs_tree.h74
-rw-r--r--include/uapi/linux/can.h1
-rw-r--r--include/uapi/linux/can/raw.h2
-rw-r--r--include/uapi/linux/capability.h5
-rw-r--r--include/uapi/linux/cgroupstats.h2
-rw-r--r--include/uapi/linux/cn_proc.h62
-rw-r--r--include/uapi/linux/counter.h8
-rw-r--r--include/uapi/linux/cxl_mem.h1
-rw-r--r--include/uapi/linux/devlink.h9
-rw-r--r--include/uapi/linux/dlm_plock.h1
-rw-r--r--include/uapi/linux/dpll.h208
-rw-r--r--include/uapi/linux/dvb/frontend.h8
-rw-r--r--include/uapi/linux/dvb/version.h2
-rw-r--r--include/uapi/linux/elf-fdpic.h15
-rw-r--r--include/uapi/linux/elf.h11
-rw-r--r--include/uapi/linux/ethtool.h41
-rw-r--r--include/uapi/linux/ethtool_netlink.h1
-rw-r--r--include/uapi/linux/eventfd.h11
-rw-r--r--include/uapi/linux/fcntl.h8
-rw-r--r--include/uapi/linux/firewire-cdev.h178
-rw-r--r--include/uapi/linux/fs.h60
-rw-r--r--include/uapi/linux/fscrypt.h3
-rw-r--r--include/uapi/linux/fsi.h10
-rw-r--r--include/uapi/linux/fuse.h65
-rw-r--r--include/uapi/linux/futex.h31
-rw-r--r--include/uapi/linux/gsmmux.h119
-rw-r--r--include/uapi/linux/gtp.h2
-rw-r--r--include/uapi/linux/hash_info.h3
-rw-r--r--include/uapi/linux/idxd.h1
-rw-r--r--include/uapi/linux/if_bridge.h19
-rw-r--r--include/uapi/linux/if_link.h563
-rw-r--r--include/uapi/linux/if_xdp.h60
-rw-r--r--include/uapi/linux/iio/types.h6
-rw-r--r--include/uapi/linux/in6.h2
-rw-r--r--include/uapi/linux/io_uring.h74
-rw-r--r--include/uapi/linux/iommufd.h348
-rw-r--r--include/uapi/linux/ioprio.h101
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/kexec.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h685
-rw-r--r--include/uapi/linux/kfd_sysfs.h15
-rw-r--r--include/uapi/linux/kvm.h170
-rw-r--r--include/uapi/linux/landlock.h55
-rw-r--r--include/uapi/linux/lsm.h90
-rw-r--r--include/uapi/linux/mdio.h42
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/media.h28
-rw-r--r--include/uapi/linux/mei.h18
-rw-r--r--include/uapi/linux/mempolicy.h2
-rw-r--r--include/uapi/linux/mman.h14
-rw-r--r--include/uapi/linux/mount.h76
-rw-r--r--include/uapi/linux/mptcp.h202
-rw-r--r--include/uapi/linux/mptcp_pm.h150
-rw-r--r--include/uapi/linux/netdev.h96
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h7
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h22
-rw-r--r--include/uapi/linux/netlink.h5
-rw-r--r--include/uapi/linux/nfsd_netlink.h39
-rw-r--r--include/uapi/linux/nl80211.h258
-rw-r--r--include/uapi/linux/npcm-video.h41
-rw-r--r--include/uapi/linux/nsm.h31
-rw-r--r--include/uapi/linux/openvswitch.h3
-rw-r--r--include/uapi/linux/pci_regs.h25
-rw-r--r--include/uapi/linux/pcitest.h3
-rw-r--r--include/uapi/linux/perf_event.h16
-rw-r--r--include/uapi/linux/pkt_cls.h67
-rw-r--r--include/uapi/linux/pkt_sched.h135
-rw-r--r--include/uapi/linux/pktcdvd.h1
-rw-r--r--include/uapi/linux/prctl.h14
-rw-r--r--include/uapi/linux/psp-dbc.h147
-rw-r--r--include/uapi/linux/psp-sev.h1
-rw-r--r--include/uapi/linux/ptp_clock.h5
-rw-r--r--include/uapi/linux/quota.h1
-rw-r--r--include/uapi/linux/raid/md_p.h8
-rw-r--r--include/uapi/linux/raid/md_u.h11
-rw-r--r--include/uapi/linux/resource.h2
-rw-r--r--include/uapi/linux/rpmsg.h10
-rw-r--r--include/uapi/linux/rtnetlink.h18
-rw-r--r--include/uapi/linux/sched/types.h4
-rw-r--r--include/uapi/linux/seccomp.h4
-rw-r--r--include/uapi/linux/sed-opal.h25
-rw-r--r--include/uapi/linux/serial.h20
-rw-r--r--include/uapi/linux/serial_core.h65
-rw-r--r--include/uapi/linux/serial_reg.h1
-rw-r--r--include/uapi/linux/sev-guest.h4
-rw-r--r--include/uapi/linux/smc.h4
-rw-r--r--include/uapi/linux/smc_diag.h2
-rw-r--r--include/uapi/linux/snmp.h8
-rw-r--r--include/uapi/linux/spi/spi.h3
-rw-r--r--include/uapi/linux/stat.h1
-rw-r--r--include/uapi/linux/stddef.h13
-rw-r--r--include/uapi/linux/sync_file.h24
-rw-r--r--include/uapi/linux/tc_act/tc_ipt.h20
-rw-r--r--include/uapi/linux/tc_act/tc_mirred.h1
-rw-r--r--include/uapi/linux/tcp.h118
-rw-r--r--include/uapi/linux/thp7312.h19
-rw-r--r--include/uapi/linux/tps6594_pfsm.h37
-rw-r--r--include/uapi/linux/types.h4
-rw-r--r--include/uapi/linux/ublk_cmd.h97
-rw-r--r--include/uapi/linux/usb/ch11.h6
-rw-r--r--include/uapi/linux/usb/ch9.h16
-rw-r--r--include/uapi/linux/usb/functionfs.h6
-rw-r--r--include/uapi/linux/usb/raw_gadget.h14
-rw-r--r--include/uapi/linux/user_events.h11
-rw-r--r--include/uapi/linux/userfaultfd.h59
-rw-r--r--include/uapi/linux/v4l2-controls.h739
-rw-r--r--include/uapi/linux/v4l2-subdev.h15
-rw-r--r--include/uapi/linux/vfio.h225
-rw-r--r--include/uapi/linux/vhost.h39
-rw-r--r--include/uapi/linux/vhost_types.h27
-rw-r--r--include/uapi/linux/videodev2.h31
-rw-r--r--include/uapi/linux/virtio_config.h13
-rw-r--r--include/uapi/linux/virtio_net.h14
-rw-r--r--include/uapi/linux/virtio_pci.h79
-rw-r--r--include/uapi/linux/virtio_pmem.h7
-rw-r--r--include/uapi/linux/vm_sockets.h17
-rw-r--r--include/uapi/linux/wireless.h2
-rw-r--r--include/uapi/linux/xfrm.h3
-rw-r--r--include/uapi/mtd/ubi-user.h4
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h75
-rw-r--r--include/uapi/rdma/efa-abi.h21
-rw-r--r--include/uapi/rdma/hns-abi.h17
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h3
-rw-r--r--include/uapi/rdma/irdma-abi.h9
-rw-r--r--include/uapi/rdma/mlx5-abi.h2
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h1
-rw-r--r--include/uapi/rdma/rdma_netlink.h4
-rw-r--r--include/uapi/rdma/siw-abi.h2
-rw-r--r--include/uapi/regulator/regulator.h90
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h2
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h77
-rw-r--r--include/uapi/sound/asequencer.h92
-rw-r--r--include/uapi/sound/asound.h90
-rw-r--r--include/uapi/sound/emu10k1.h8
-rw-r--r--include/uapi/sound/scarlett2.h54
-rw-r--r--include/uapi/sound/sof/tokens.h11
-rw-r--r--include/uapi/xen/evtchn.h9
-rw-r--r--include/uapi/xen/gntalloc.h5
-rw-r--r--include/uapi/xen/privcmd.h32
-rw-r--r--include/ufs/ufs.h108
-rw-r--r--include/ufs/ufs_quirks.h6
-rw-r--r--include/ufs/ufshcd.h108
-rw-r--r--include/ufs/ufshci.h74
-rw-r--r--include/ufs/unipro.h10
-rw-r--r--include/vdso/gettime.h23
-rw-r--r--include/video/kyro.h12
-rw-r--r--include/video/mmp_disp.h2
-rw-r--r--include/video/sticore.h406
-rw-r--r--include/video/uvesafb.h2
-rw-r--r--include/xen/arm/hypervisor.h12
-rw-r--r--include/xen/events.h26
-rw-r--r--include/xen/interface/hvm/ioreq.h51
-rw-r--r--include/xen/interface/io/displif.h2
-rw-r--r--include/xen/interface/io/ring.h2
-rw-r--r--include/xen/interface/io/sndif.h2
-rw-r--r--include/xen/xen.h9
1630 files changed, 73311 insertions, 22263 deletions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index d71291f25a80..76aa6aa346ba 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -22,6 +22,7 @@
#define METHOD_NAME__DDN "_DDN"
#define METHOD_NAME__DIS "_DIS"
#define METHOD_NAME__DMA "_DMA"
+#define METHOD_NAME__EVT "_EVT"
#define METHOD_NAME__HID "_HID"
#define METHOD_NAME__INI "_INI"
#define METHOD_NAME__PLD "_PLD"
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index a6affc0550b0..e4d24d3f9abb 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -12,11 +12,9 @@
#include <linux/device.h>
#include <linux/property.h>
-/* TBD: Make dynamic */
-#define ACPI_MAX_HANDLES 10
struct acpi_handle_list {
u32 count;
- acpi_handle handles[ACPI_MAX_HANDLES];
+ acpi_handle *handles;
};
/* acpi_utils.h */
@@ -27,11 +25,15 @@ acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments, unsigned long long *data);
-acpi_status
-acpi_evaluate_reference(acpi_handle handle,
- acpi_string pathname,
- struct acpi_object_list *arguments,
- struct acpi_handle_list *list);
+bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list);
+bool acpi_handle_list_equal(struct acpi_handle_list *list1,
+ struct acpi_handle_list *list2);
+void acpi_handle_list_replace(struct acpi_handle_list *dst,
+ struct acpi_handle_list *src);
+void acpi_handle_list_free(struct acpi_handle_list *list);
+bool acpi_device_dep(acpi_handle target, acpi_handle match);
acpi_status
acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
struct acpi_buffer *status_buf);
@@ -289,6 +291,8 @@ struct acpi_dep_data {
acpi_handle supplier;
acpi_handle consumer;
bool honor_dep;
+ bool met;
+ bool free_when_met;
};
/* Performance Management */
@@ -361,6 +365,98 @@ struct acpi_device_data {
struct acpi_gpio_mapping;
+#define ACPI_DEVICE_SWNODE_ROOT 0
+
+/*
+ * The maximum expected number of CSI-2 data lanes.
+ *
+ * This number is not expected to ever have to be equal to or greater than the
+ * number of bits in an unsigned long variable, but if it needs to be increased
+ * above that limit, code will need to be adjusted accordingly.
+ */
+#define ACPI_DEVICE_CSI2_DATA_LANES 8
+
+#define ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH 8
+
+enum acpi_device_swnode_dev_props {
+ ACPI_DEVICE_SWNODE_DEV_ROTATION,
+ ACPI_DEVICE_SWNODE_DEV_CLOCK_FREQUENCY,
+ ACPI_DEVICE_SWNODE_DEV_LED_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_TIMEOUT_US,
+ ACPI_DEVICE_SWNODE_DEV_NUM_OF,
+ ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_port_props {
+ ACPI_DEVICE_SWNODE_PORT_REG,
+ ACPI_DEVICE_SWNODE_PORT_NUM_OF,
+ ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_ep_props {
+ ACPI_DEVICE_SWNODE_EP_REMOTE_EP,
+ ACPI_DEVICE_SWNODE_EP_BUS_TYPE,
+ ACPI_DEVICE_SWNODE_EP_REG,
+ ACPI_DEVICE_SWNODE_EP_CLOCK_LANES,
+ ACPI_DEVICE_SWNODE_EP_DATA_LANES,
+ ACPI_DEVICE_SWNODE_EP_LANE_POLARITIES,
+ /* TX only */
+ ACPI_DEVICE_SWNODE_EP_LINK_FREQUENCIES,
+ ACPI_DEVICE_SWNODE_EP_NUM_OF,
+ ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES
+};
+
+/*
+ * Each device has a root software node plus two times as many nodes as the
+ * number of CSI-2 ports.
+ */
+#define ACPI_DEVICE_SWNODE_PORT(port) (2 * (port) + 1)
+#define ACPI_DEVICE_SWNODE_EP(endpoint) \
+ (ACPI_DEVICE_SWNODE_PORT(endpoint) + 1)
+
+/**
+ * struct acpi_device_software_node_port - MIPI DisCo for Imaging CSI-2 port
+ * @port_name: Port name.
+ * @data_lanes: "data-lanes" property values.
+ * @lane_polarities: "lane-polarities" property values.
+ * @link_frequencies: "link_frequencies" property values.
+ * @port_nr: Port number.
+ * @crs_crs2_local: _CRS CSI2 record present (i.e. this is a transmitter one).
+ * @port_props: Port properties.
+ * @ep_props: Endpoint properties.
+ * @remote_ep: Reference to the remote endpoint.
+ */
+struct acpi_device_software_node_port {
+ char port_name[ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH + 1];
+ u32 data_lanes[ACPI_DEVICE_CSI2_DATA_LANES];
+ u32 lane_polarities[ACPI_DEVICE_CSI2_DATA_LANES + 1 /* clock lane */];
+ u64 link_frequencies[ACPI_DEVICE_CSI2_DATA_LANES];
+ unsigned int port_nr;
+ bool crs_csi2_local;
+
+ struct property_entry port_props[ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES];
+ struct property_entry ep_props[ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES];
+
+ struct software_node_ref_args remote_ep[1];
+};
+
+/**
+ * struct acpi_device_software_nodes - Software nodes for an ACPI device
+ * @dev_props: Device properties.
+ * @nodes: Software nodes for root as well as ports and endpoints.
+ * @nodeprts: Array of software node pointers, for (un)registering them.
+ * @ports: Information related to each port and endpoint within a port.
+ * @num_ports: The number of ports.
+ */
+struct acpi_device_software_nodes {
+ struct property_entry dev_props[ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES];
+ struct software_node *nodes;
+ const struct software_node **nodeptrs;
+ struct acpi_device_software_node_port *ports;
+ unsigned int num_ports;
+};
+
/* Device */
struct acpi_device {
u32 pld_crc;
@@ -379,6 +475,7 @@ struct acpi_device {
struct acpi_device_data data;
struct acpi_scan_handler *handler;
struct acpi_hotplug_context *hp;
+ struct acpi_device_software_nodes *swnodes;
const struct acpi_gpio_mapping *driver_gpios;
void *driver_data;
struct device dev;
@@ -513,6 +610,12 @@ void acpi_bus_private_data_handler(acpi_handle, void *);
int acpi_bus_get_private_data(acpi_handle, void **);
int acpi_bus_attach_private_data(acpi_handle, void *);
void acpi_bus_detach_private_data(acpi_handle);
+int acpi_dev_install_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler, void *context);
+void acpi_dev_remove_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler);
extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
@@ -531,6 +634,7 @@ int acpi_device_set_power(struct acpi_device *device, int state);
int acpi_bus_init_power(struct acpi_device *device);
int acpi_device_fix_up_power(struct acpi_device *device);
void acpi_device_fix_up_power_extended(struct acpi_device *adev);
+void acpi_device_fix_up_power_children(struct acpi_device *adev);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
int acpi_device_update_power(struct acpi_device *device, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
@@ -561,8 +665,6 @@ int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids);
void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
char *modalias, size_t len);
-int acpi_create_dir(struct acpi_device *);
-void acpi_remove_dir(struct acpi_device *);
static inline bool acpi_device_enumerated(struct acpi_device *adev)
{
@@ -617,6 +719,8 @@ struct acpi_pci_root {
/* helper */
+struct iommu_ops;
+
bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_iommu_fwspec_init(struct device *dev, u32 id,
@@ -643,6 +747,8 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev);
#ifdef CONFIG_X86
bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status);
bool acpi_quirk_skip_acpi_ac_and_battery(void);
+int acpi_install_cmos_rtc_space_handler(acpi_handle handle);
+void acpi_remove_cmos_rtc_space_handler(acpi_handle handle);
#else
static inline bool acpi_device_override_status(struct acpi_device *adev,
unsigned long long *status)
@@ -653,6 +759,13 @@ static inline bool acpi_quirk_skip_acpi_ac_and_battery(void)
{
return false;
}
+static inline int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
+{
+ return 1;
+}
+static inline void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
+{
+}
#endif
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
@@ -745,9 +858,71 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
-bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer);
+static inline bool acpi_dev_hid_match(struct acpi_device *adev, const char *hid2)
+{
+ const char *hid1 = acpi_device_hid(adev);
+
+ return hid1 && hid2 && !strcmp(hid1, hid2);
+}
+
+static inline bool acpi_str_uid_match(struct acpi_device *adev, const char *uid2)
+{
+ const char *uid1 = acpi_device_uid(adev);
+
+ return uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
+{
+ u64 uid1;
+
+ return !acpi_dev_uid_to_integer(adev, &uid1) && uid1 == uid2;
+}
+
+#define TYPE_ENTRY(type, x) \
+ const type: x, \
+ type: x
+
+#define ACPI_STR_TYPES(match) \
+ TYPE_ENTRY(unsigned char *, match), \
+ TYPE_ENTRY(signed char *, match), \
+ TYPE_ENTRY(char *, match), \
+ TYPE_ENTRY(void *, match)
+
+/**
+ * acpi_dev_uid_match - Match device by supplied UID
+ * @adev: ACPI device to match.
+ * @uid2: Unique ID of the device.
+ *
+ * Matches UID in @adev with given @uid2.
+ *
+ * Returns: %true if matches, %false otherwise.
+ */
+#define acpi_dev_uid_match(adev, uid2) \
+ _Generic(uid2, \
+ /* Treat @uid2 as a string for acpi string types */ \
+ ACPI_STR_TYPES(acpi_str_uid_match), \
+ /* Treat as an integer otherwise */ \
+ default: acpi_int_uid_match)(adev, uid2)
+
+/**
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+ * @uid2: Unique ID of the device, pass 0 or NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
+ * will be treated as a match. If user wants to validate @uid2, it should be
+ * done before calling this function.
+ *
+ * Returns: %true if matches or @uid2 is 0 or NULL, %false otherwise.
+ */
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) \
+ (acpi_dev_hid_match(adev, hid2) && \
+ (!(uid2) || acpi_dev_uid_match(adev, uid2)))
+
void acpi_dev_clear_dependencies(struct acpi_device *supplier);
bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 8372b0e7fd15..b14d165632e7 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -27,6 +27,8 @@
#define ACPI_BAY_HID "LNXIOBAY"
#define ACPI_DOCK_HID "LNXDOCK"
#define ACPI_ECDT_HID "LNXEC"
+/* SMBUS HID definition as supported by Microsoft Windows */
+#define ACPI_SMBUS_MS_HID "SMB0001"
/* Quirk for broken IBM BIOSes */
#define ACPI_SMBUS_IBM_HID "SMBUSIBM"
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 9ffdc0425bc2..3d90716f9522 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20230331
+#define ACPI_CA_VERSION 0x20230628
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -970,8 +970,6 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
void **data,
void (*callback)(void *)))
-void acpi_run_debugger(char *batch_buffer);
-
void acpi_set_debugger_thread_id(acpi_thread_id thread_id);
#endif /* __ACXFACE_H__ */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index e5dfb6f4de52..451f6276da49 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -307,7 +307,8 @@ enum acpi_preferred_pm_profiles {
PM_SOHO_SERVER = 5,
PM_APPLIANCE_PC = 6,
PM_PERFORMANCE_SERVER = 7,
- PM_TABLET = 8
+ PM_TABLET = 8,
+ NR_PM_PROFILES = 9
};
/* Values for sleep_status and sleep_control registers (V5+ FADT) */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 58b0490a2ad1..a33375e055ad 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -402,7 +402,7 @@ struct acpi_cdat_dsmas {
/* Flags for subtable above */
-#define ACPI_CEDT_DSMAS_NON_VOLATILE (1 << 2)
+#define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2)
/* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */
@@ -465,6 +465,9 @@ struct acpi_cdat_sslbe {
u16 reserved;
};
+#define ACPI_CDAT_SSLBIS_US_PORT 0x0100
+#define ACPI_CDAT_SSLBIS_ANY_PORT 0xffff
+
/*******************************************************************************
*
* CEDT - CXL Early Discovery Table
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 0029336775a9..9775384d61c6 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -893,7 +893,10 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_BIO_PIC = 22,
ACPI_MADT_TYPE_LPC_PIC = 23,
ACPI_MADT_TYPE_RINTC = 24,
- ACPI_MADT_TYPE_RESERVED = 25, /* 25 to 0x7F are reserved */
+ ACPI_MADT_TYPE_IMSIC = 25,
+ ACPI_MADT_TYPE_APLIC = 26,
+ ACPI_MADT_TYPE_PLIC = 27,
+ ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */
ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */
};
@@ -1043,6 +1046,8 @@ struct acpi_madt_generic_interrupt {
/* ACPI_MADT_ENABLED (1) Processor is usable if set */
#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
+#define ACPI_MADT_GICC_ONLINE_CAPABLE (1<<3) /* 03: Processor is online capable */
+#define ACPI_MADT_GICC_NON_COHERENT (1<<4) /* 04: GIC redistributor is not coherent */
/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
@@ -1087,21 +1092,27 @@ struct acpi_madt_generic_msi_frame {
struct acpi_madt_generic_redistributor {
struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
u64 base_address;
u32 length;
};
+#define ACPI_MADT_GICR_NON_COHERENT (1)
+
/* 15: Generic Translator (ACPI 6.0) */
struct acpi_madt_generic_translator {
struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
u32 translation_id;
u64 base_address;
u32 reserved2;
};
+#define ACPI_MADT_ITS_NON_COHERENT (1)
+
/* 16: Multiprocessor wakeup (ACPI 6.4) */
struct acpi_madt_multiproc_wakeup {
@@ -1261,6 +1272,9 @@ struct acpi_madt_rintc {
u32 flags;
u64 hart_id;
u32 uid; /* ACPI processor UID */
+ u32 ext_intc_id; /* External INTC Id */
+ u64 imsic_addr; /* IMSIC base address */
+ u32 imsic_size; /* IMSIC size */
};
/* Values for RISC-V INTC Version field above */
@@ -1271,6 +1285,48 @@ enum acpi_madt_rintc_version {
ACPI_MADT_RINTC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
};
+/* 25: RISC-V IMSIC */
+struct acpi_madt_imsic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 reserved;
+ u32 flags;
+ u16 num_ids;
+ u16 num_guest_ids;
+ u8 guest_index_bits;
+ u8 hart_index_bits;
+ u8 group_index_bits;
+ u8 group_index_shift;
+};
+
+/* 26: RISC-V APLIC */
+struct acpi_madt_aplic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 id;
+ u32 flags;
+ u8 hw_id[8];
+ u16 num_idcs;
+ u16 num_sources;
+ u32 gsi_base;
+ u64 base_addr;
+ u32 size;
+};
+
+/* 27: RISC-V PLIC */
+struct acpi_madt_plic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 id;
+ u8 hw_id[8];
+ u16 num_irqs;
+ u16 max_prio;
+ u32 flags;
+ u32 size;
+ u64 base_addr;
+ u32 gsi_base;
+};
+
/* 80: OEM data */
struct acpi_madt_oem_data {
@@ -2730,12 +2786,15 @@ enum acpi_rgrt_image_type {
struct acpi_table_rhct {
struct acpi_table_header header; /* Common ACPI table header */
- u32 reserved;
+ u32 flags; /* RHCT flags */
u64 time_base_freq;
u32 node_count;
u32 node_offset;
};
+/* RHCT Flags */
+
+#define ACPI_RHCT_TIMER_CANNOT_WAKEUP_CPU (1)
/*
* RHCT subtables
*/
@@ -2749,6 +2808,9 @@ struct acpi_rhct_node_header {
enum acpi_rhct_node_type {
ACPI_RHCT_NODE_TYPE_ISA_STRING = 0x0000,
+ ACPI_RHCT_NODE_TYPE_CMO = 0x0001,
+ ACPI_RHCT_NODE_TYPE_MMU = 0x0002,
+ ACPI_RHCT_NODE_TYPE_RESERVED = 0x0003,
ACPI_RHCT_NODE_TYPE_HART_INFO = 0xFFFF,
};
@@ -2762,6 +2824,24 @@ struct acpi_rhct_isa_string {
char isa[];
};
+struct acpi_rhct_cmo_node {
+ u8 reserved; /* Must be zero */
+ u8 cbom_size; /* CBOM size in powerof 2 */
+ u8 cbop_size; /* CBOP size in powerof 2 */
+ u8 cboz_size; /* CBOZ size in powerof 2 */
+};
+
+struct acpi_rhct_mmu_node {
+ u8 reserved; /* Must be zero */
+ u8 mmu_type; /* Virtual Address Scheme */
+};
+
+enum acpi_rhct_mmu_type {
+ ACPI_RHCT_MMU_TYPE_SV39 = 0,
+ ACPI_RHCT_MMU_TYPE_SV48 = 1,
+ ACPI_RHCT_MMU_TYPE_SV57 = 2
+};
+
/* Hart Info node structure */
struct acpi_rhct_hart_info {
u16 num_offsets;
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index f51c46f4e3e4..c080d579a546 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -86,7 +86,7 @@ struct acpi_table_slic {
struct acpi_table_slit {
struct acpi_table_header header; /* Common ACPI table header */
u64 locality_count;
- u8 entry[1]; /* Real size = localities^2 */
+ u8 entry[]; /* Real size = localities^2 */
};
/*******************************************************************************
@@ -279,12 +279,14 @@ struct acpi_srat_gic_its_affinity {
* 6: ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY
*/
+#define ACPI_SRAT_DEVICE_HANDLE_SIZE 16
+
struct acpi_srat_generic_affinity {
struct acpi_subtable_header header;
u8 reserved;
u8 device_handle_type;
u32 proximity_domain;
- u8 device_handle[16];
+ u8 device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
u32 flags;
u32 reserved1;
};
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 6126c977ece0..3a0995f8bce8 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -144,6 +144,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern bool cppc_perf_ctrs_in_pcc(void);
+extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
+extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
extern bool acpi_cpc_valid(void);
extern bool cppc_allow_fast_switch(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 3c8bba9f1114..be1dd4c1a917 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -73,8 +73,12 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
struct list_head *ghes_get_devices(void);
+
+void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
#else
static inline struct list_head *ghes_get_devices(void) { return NULL; }
+
+static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; }
#endif
int ghes_estatus_pool_init(unsigned int num_ghes);
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 73e806fe7ce7..9b373d172a77 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -18,7 +18,20 @@ struct pcc_mbox_chan {
u16 min_turnaround_time;
};
+/* Generic Communications Channel Shared Memory Region */
+#define PCC_SIGNATURE 0x50434300
+/* Generic Communications Channel Command Field */
+#define PCC_CMD_GENERATE_DB_INTR BIT(15)
+/* Generic Communications Channel Status Field */
+#define PCC_STATUS_CMD_COMPLETE BIT(0)
+#define PCC_STATUS_SCI_DOORBELL BIT(1)
+#define PCC_STATUS_ERROR BIT(2)
+#define PCC_STATUS_PLATFORM_NOTIFY BIT(3)
+/* Initiator Responder Communications Channel Flags */
+#define PCC_CMD_COMPLETION_NOTIFY BIT(0)
+
#define MAX_PCC_SUBSPACES 256
+
#ifdef CONFIG_PCC
extern struct pcc_mbox_chan *
pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id);
diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h
deleted file mode 100644
index 967c552d1cd3..000000000000
--- a/include/acpi/pdc_intel.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* _PDC bit definition for Intel processors */
-
-#ifndef __PDC_INTEL_H__
-#define __PDC_INTEL_H__
-
-#define ACPI_PDC_P_FFH (0x0001)
-#define ACPI_PDC_C_C1_HALT (0x0002)
-#define ACPI_PDC_T_FFH (0x0004)
-#define ACPI_PDC_SMP_C1PT (0x0008)
-#define ACPI_PDC_SMP_C2C3 (0x0010)
-#define ACPI_PDC_SMP_P_SWCOORD (0x0020)
-#define ACPI_PDC_SMP_C_SWCOORD (0x0040)
-#define ACPI_PDC_SMP_T_SWCOORD (0x0080)
-#define ACPI_PDC_C_C1_FFH (0x0100)
-#define ACPI_PDC_C_C2C3_FFH (0x0200)
-#define ACPI_PDC_SMP_P_HWCOORD (0x0800)
-
-#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_P_FFH)
-
-#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_SMP_P_SWCOORD | \
- ACPI_PDC_SMP_P_HWCOORD | \
- ACPI_PDC_P_FFH)
-
-#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
- ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_C_C1_FFH | \
- ACPI_PDC_C_C2C3_FFH)
-
-#endif /* __PDC_INTEL_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 1ca450e35c0d..565341c826e3 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -182,6 +182,7 @@
#ifdef ACPI_USE_STANDARD_HEADERS
#include <stddef.h>
#include <unistd.h>
+#include <stdint.h>
#define ACPI_OFFSET(d, f) offsetof(d, f)
#endif
diff --git a/include/acpi/platform/aczephyr.h b/include/acpi/platform/aczephyr.h
index 2f0d30c3c5fd..703db4dc740d 100644
--- a/include/acpi/platform/aczephyr.h
+++ b/include/acpi/platform/aczephyr.h
@@ -10,9 +10,6 @@
#ifndef __ACZEPHYR_H__
#define __ACZEPHYR_H__
-#define SEEK_SET FS_SEEK_SET
-#define SEEK_END FS_SEEK_END
-
#define ACPI_MACHINE_WIDTH 64
#define ACPI_NO_ERROR_MESSAGES
diff --git a/include/acpi/proc_cap_intel.h b/include/acpi/proc_cap_intel.h
new file mode 100644
index 000000000000..ddcdc41d6c3e
--- /dev/null
+++ b/include/acpi/proc_cap_intel.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Vendor specific processor capabilities bit definition
+ * for Intel processors. Those bits are used to convey OSPM
+ * power management capabilities to the platform.
+ */
+
+#ifndef __PROC_CAP_INTEL_H__
+#define __PROC_CAP_INTEL_H__
+
+#define ACPI_PROC_CAP_P_FFH (0x0001)
+#define ACPI_PROC_CAP_C_C1_HALT (0x0002)
+#define ACPI_PROC_CAP_T_FFH (0x0004)
+#define ACPI_PROC_CAP_SMP_C1PT (0x0008)
+#define ACPI_PROC_CAP_SMP_C2C3 (0x0010)
+#define ACPI_PROC_CAP_SMP_P_SWCOORD (0x0020)
+#define ACPI_PROC_CAP_SMP_C_SWCOORD (0x0040)
+#define ACPI_PROC_CAP_SMP_T_SWCOORD (0x0080)
+#define ACPI_PROC_CAP_C_C1_FFH (0x0100)
+#define ACPI_PROC_CAP_C_C2C3_FFH (0x0200)
+#define ACPI_PROC_CAP_SMP_P_HWCOORD (0x0800)
+#define ACPI_PROC_CAP_COLLAB_PROC_PERF (0x1000)
+
+#define ACPI_PROC_CAP_EST_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_P_FFH)
+
+#define ACPI_PROC_CAP_EST_CAPABILITY_SWSMP (ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_SMP_P_SWCOORD | \
+ ACPI_PROC_CAP_SMP_P_HWCOORD | \
+ ACPI_PROC_CAP_P_FFH)
+
+#define ACPI_PROC_CAP_C_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C2C3 | \
+ ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_C_C1_FFH | \
+ ACPI_PROC_CAP_C_C2C3_FFH)
+
+#endif /* __PROC_CAP_INTEL_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 94181fe9780a..3f34ebb27525 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -465,9 +465,4 @@ extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
#endif
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-extern int arch_register_cpu(int cpu);
-extern void arch_unregister_cpu(int cpu);
-#endif
-
#endif
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 4230392b5b0b..3d538d4178ab 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -75,6 +75,15 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
return __acpi_video_get_backlight_type(false, NULL);
}
+/*
+ * This function MUST only be called by GPU drivers to check if the driver
+ * should register a backlight class device. This function not only checks
+ * if a GPU native backlight device should be registered it *also* tells
+ * the ACPI video-detect code that native GPU backlight control is available.
+ * Therefor calling this from any place other then the GPU driver is wrong!
+ * To check if GPU native backlight control is used in other places instead use:
+ * if (acpi_video_get_backlight_type() == acpi_backlight_native) { ... }
+ */
static inline bool acpi_video_backlight_use_native(void)
{
return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 941be574bbe0..d436bee4d129 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -2,7 +2,7 @@
#
# asm headers that all architectures except um should have
# (This file is not included when SRCARCH=um since UML borrows several
-# asm headers from the host architecutre.)
+# asm headers from the host architecture.)
mandatory-y += atomic.h
mandatory-y += archrandom.h
@@ -11,6 +11,7 @@ mandatory-y += bitops.h
mandatory-y += bug.h
mandatory-y += bugs.h
mandatory-y += cacheflush.h
+mandatory-y += cfi.h
mandatory-y += checksum.h
mandatory-y += compat.h
mandatory-y += current.h
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index e271d6708c87..22142c71d35a 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -130,7 +130,4 @@ ATOMIC_OP(xor, ^)
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (u32)(v)))
-#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (u32)(old), (u32)(new)))
-
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 71ab4ba9c25d..e076e079f6b2 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,21 +15,21 @@ static __always_inline void
arch_set_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
static __always_inline void
arch_clear_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
static __always_inline void
arch_change_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
static __always_inline int
@@ -39,7 +39,7 @@ arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask);
}
@@ -50,7 +50,7 @@ arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask);
}
@@ -61,7 +61,7 @@ arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
return !!(old & mask);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index eb64bd4f11f3..542d3727ee4e 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -58,27 +58,25 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
return arch_test_and_set_bit_lock(nr, addr);
}
-#if defined(arch_clear_bit_unlock_is_negative_byte)
/**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- * byte is negative, for unlock.
- * @nr: the bit to clear
- * @addr: the address to start counting from
+ * xor_unlock_is_negative_byte - XOR a single byte in memory and test if
+ * it is negative, for unlock.
+ * @mask: Change the bits which are set in this mask.
+ * @addr: The address of the word containing the byte to change.
*
+ * Changes some of bits 0-6 in the word pointed to by @addr.
* This operation is atomic and provides release barrier semantics.
+ * Used to optimise some folio operations which are commonly paired
+ * with an unlock or end of writeback. Bit 7 is used as PG_waiters to
+ * indicate whether anybody is waiting for the unlock.
*
- * This is a bit of a one-trick-pony for the filemap code, which clears
- * PG_locked and tests PG_waiters,
+ * Return: Whether the top bit of the byte is set.
*/
-static inline bool
-clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+static inline bool xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *addr)
{
kcsan_release();
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
- return arch_clear_bit_unlock_is_negative_byte(nr, addr);
+ instrument_atomic_write(addr, sizeof(long));
+ return arch_xor_unlock_is_negative_byte(mask, addr);
}
-/* Let everybody know we have it. */
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
-#endif
-
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 630f2f6b9595..14d4ec8c5152 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -25,7 +25,7 @@ arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
if (READ_ONCE(*p) & mask)
return 1;
- old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
return !!(old & mask);
}
@@ -41,7 +41,7 @@ static __always_inline void
arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
}
/**
@@ -63,30 +63,18 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
p += BIT_WORD(nr);
old = READ_ONCE(*p);
old &= ~BIT_MASK(nr);
- arch_atomic_long_set_release((atomic_long_t *)p, old);
+ raw_atomic_long_set_release((atomic_long_t *)p, old);
}
-/**
- * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- * byte is negative, for unlock.
- * @nr: the bit to clear
- * @addr: the address to start counting from
- *
- * This is a bit of a one-trick-pony for the filemap code, which clears
- * PG_locked and tests PG_waiters,
- */
-#ifndef arch_clear_bit_unlock_is_negative_byte
-static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
- volatile unsigned long *p)
+#ifndef arch_xor_unlock_is_negative_byte
+static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *p)
{
long old;
- unsigned long mask = BIT_MASK(nr);
- p += BIT_WORD(nr);
- old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p);
return !!(old & BIT(7));
}
-#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
#endif
#include <asm-generic/bitops/instrumented-lock.h>
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 4050b191e1a9..6e794420bd39 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -87,10 +87,12 @@ struct bug_entry {
*
* Use the versions with printk format strings to provide better diagnostics.
*/
-#ifndef __WARN_FLAGS
extern __printf(4, 5)
void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
const char *fmt, ...);
+extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
+
+#ifndef __WARN_FLAGS
#define __WARN() __WARN_printf(TAINT_WARN, NULL)
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
@@ -98,7 +100,6 @@ void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
instrumentation_end(); \
} while (0)
#else
-extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h
deleted file mode 100644
index 69021830f078..000000000000
--- a/include/asm-generic/bugs.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_BUGS_H
-#define __ASM_GENERIC_BUGS_H
-/*
- * This file is included by 'init/main.c' to check for
- * architecture-dependent bugs.
- */
-
-static inline void check_bugs(void) { }
-
-#endif /* __ASM_GENERIC_BUGS_H */
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index f46258d1a080..7ee8a179d103 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -77,13 +77,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
#define flush_icache_user_range flush_icache_range
#endif
-#ifndef flush_icache_page
-static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
-{
-}
-#endif
-
#ifndef flush_icache_user_page
static inline void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page,
@@ -98,6 +91,12 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
}
#endif
+#ifndef flush_cache_vmap_early
+static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
+{
+}
+#endif
+
#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
diff --git a/include/asm-generic/cfi.h b/include/asm-generic/cfi.h
new file mode 100644
index 000000000000..41fac3537bf9
--- /dev/null
+++ b/include/asm-generic/cfi.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CFI_H
+#define __ASM_GENERIC_CFI_H
+
+#endif /* __ASM_GENERIC_CFI_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index 43e18db89c14..ad928cce268b 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -2,6 +2,8 @@
#ifndef __ASM_GENERIC_CHECKSUM_H
#define __ASM_GENERIC_CHECKSUM_H
+#include <linux/bitops.h>
+
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
@@ -31,9 +33,7 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
- sum = (sum & 0xffff) + (sum >> 16);
- sum = (sum & 0xffff) + (sum >> 16);
- return (__force __sum16)~sum;
+ return (__force __sum16)((~sum - ror32(sum, 16)) >> 16);
}
#endif
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index 3df9f59a544e..f27d66fdc00a 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -34,7 +34,7 @@ static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
*(u16 *)ptr = (new & 0xffffu);
break;
case 4: prev = *(u32 *)ptr;
- if (prev == (old & 0xffffffffffu))
+ if (prev == (old & 0xffffffffu))
*(u32 *)ptr = (new & 0xffffffffu);
break;
case 8: prev = *(u64 *)ptr;
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
index 3a2e224b9fa0..9c2aeecbd05a 100644
--- a/include/asm-generic/current.h
+++ b/include/asm-generic/current.h
@@ -2,9 +2,11 @@
#ifndef __ASM_GENERIC_CURRENT_H
#define __ASM_GENERIC_CURRENT_H
+#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
#define get_current() (current_thread_info()->task)
#define current get_current()
+#endif
#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 5e4b1f2369d2..570cd4da7210 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -3,86 +3,9 @@
#define __ASM_GENERIC_EXPORT_H
/*
- * This comment block is used by fixdep. Please do not remove.
- *
- * When CONFIG_MODVERSIONS is changed from n to y, all source files having
- * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
- * side effect of the *.o build rule.
+ * <asm/export.h> and <asm-generic/export.h> are deprecated.
+ * Please include <linux/export.h> directly.
*/
-
-#ifndef KSYM_FUNC
-#define KSYM_FUNC(x) x
-#endif
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define KSYM_ALIGN 4
-#elif defined(CONFIG_64BIT)
-#define KSYM_ALIGN 8
-#else
-#define KSYM_ALIGN 4
-#endif
-
-.macro __put, val, name
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
- .long \val - ., \name - ., 0
-#elif defined(CONFIG_64BIT)
- .quad \val, \name, 0
-#else
- .long \val, \name, 0
-#endif
-.endm
-
-/*
- * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
- * section flag requires it. Use '%progbits' instead of '@progbits' since the
- * former apparently works on all arches according to the binutils source.
- */
-
-.macro ___EXPORT_SYMBOL name,val,sec
-#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS)
- .section ___ksymtab\sec+\name,"a"
- .balign KSYM_ALIGN
-__ksymtab_\name:
- __put \val, __kstrtab_\name
- .previous
- .section __ksymtab_strings,"aMS",%progbits,1
-__kstrtab_\name:
- .asciz "\name"
- .previous
-#endif
-.endm
-
-#if defined(CONFIG_TRIM_UNUSED_KSYMS)
-
-#include <linux/kconfig.h>
-#include <generated/autoksyms.h>
-
-.macro __ksym_marker sym
- .section ".discard.ksym","a"
-__ksym_marker_\sym:
- .previous
-.endm
-
-#define __EXPORT_SYMBOL(sym, val, sec) \
- __ksym_marker sym; \
- __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, val, sec, conf) \
- ___cond_export_sym(sym, val, sec, conf)
-#define ___cond_export_sym(sym, val, sec, enabled) \
- __cond_export_sym_##enabled(sym, val, sec)
-#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
-#define __cond_export_sym_0(sym, val, sec) /* nothing */
-
-#else
-#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
-#endif
-
-#define EXPORT_SYMBOL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(name),)
-#define EXPORT_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl)
-#define EXPORT_DATA_SYMBOL(name) \
- __EXPORT_SYMBOL(name, name,)
-#define EXPORT_DATA_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, name,_gpl)
+#include <linux/export.h>
#endif
diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h
index f9f18101ed36..6ccabb400aa6 100644
--- a/include/asm-generic/fb.h
+++ b/include/asm-generic/fb.h
@@ -1,13 +1,135 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef __ASM_GENERIC_FB_H_
#define __ASM_GENERIC_FB_H_
-#include <linux/fb.h>
-#define fb_pgprotect(...) do {} while (0)
+/*
+ * Only include this header file from your architecture's <asm/fb.h>.
+ */
+
+#include <linux/io.h>
+#include <linux/mm_types.h>
+#include <linux/pgtable.h>
+
+struct fb_info;
+
+#ifndef pgprot_framebuffer
+#define pgprot_framebuffer pgprot_framebuffer
+static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
+ unsigned long vm_start, unsigned long vm_end,
+ unsigned long offset)
+{
+ return pgprot_writecombine(prot);
+}
+#endif
+#ifndef fb_is_primary_device
+#define fb_is_primary_device fb_is_primary_device
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
+#endif
+
+/*
+ * I/O helpers for the framebuffer. Prefer these functions over their
+ * regular counterparts. The regular I/O functions provide in-order
+ * access and swap bytes to/from little-endian ordering. Neither is
+ * required for framebuffers. Instead, the helpers read and write
+ * raw framebuffer data. Independent operations can be reordered for
+ * improved performance.
+ */
+
+#ifndef fb_readb
+static inline u8 fb_readb(const volatile void __iomem *addr)
+{
+ return __raw_readb(addr);
+}
+#define fb_readb fb_readb
+#endif
+
+#ifndef fb_readw
+static inline u16 fb_readw(const volatile void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+#define fb_readw fb_readw
+#endif
+
+#ifndef fb_readl
+static inline u32 fb_readl(const volatile void __iomem *addr)
+{
+ return __raw_readl(addr);
+}
+#define fb_readl fb_readl
+#endif
+
+#ifndef fb_readq
+#if defined(__raw_readq)
+static inline u64 fb_readq(const volatile void __iomem *addr)
+{
+ return __raw_readq(addr);
+}
+#define fb_readq fb_readq
+#endif
+#endif
+
+#ifndef fb_writeb
+static inline void fb_writeb(u8 b, volatile void __iomem *addr)
+{
+ __raw_writeb(b, addr);
+}
+#define fb_writeb fb_writeb
+#endif
+
+#ifndef fb_writew
+static inline void fb_writew(u16 b, volatile void __iomem *addr)
+{
+ __raw_writew(b, addr);
+}
+#define fb_writew fb_writew
+#endif
+
+#ifndef fb_writel
+static inline void fb_writel(u32 b, volatile void __iomem *addr)
+{
+ __raw_writel(b, addr);
+}
+#define fb_writel fb_writel
+#endif
+
+#ifndef fb_writeq
+#if defined(__raw_writeq)
+static inline void fb_writeq(u64 b, volatile void __iomem *addr)
+{
+ __raw_writeq(b, addr);
+}
+#define fb_writeq fb_writeq
+#endif
+#endif
+
+#ifndef fb_memcpy_fromio
+static inline void fb_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+{
+ memcpy_fromio(to, from, n);
+}
+#define fb_memcpy_fromio fb_memcpy_fromio
+#endif
+
+#ifndef fb_memcpy_toio
+static inline void fb_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+{
+ memcpy_toio(to, from, n);
+}
+#define fb_memcpy_toio fb_memcpy_toio
+#endif
+
+#ifndef fb_memset
+static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n)
+{
+ memset_io(addr, c, n);
+}
+#define fb_memset fb_memset_io
+#endif
#endif /* __ASM_GENERIC_FB_H_ */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index d7f6335d3999..6dcf4d576970 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -22,7 +22,7 @@ static inline unsigned long huge_pte_dirty(pte_t pte)
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
- return pte_mkwrite(pte);
+ return pte_mkwrite_novma(pte);
}
#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
@@ -76,7 +76,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
set_pte_at(mm, addr, ptep, pte);
}
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index f4e4cc4f965f..fdac4a1714ec 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -223,6 +223,7 @@ enum HV_GENERIC_SET_FORMAT {
#define HV_STATUS_INVALID_PORT_ID 17
#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+#define HV_STATUS_TIME_OUT 120
#define HV_STATUS_VTL_ALREADY_ENABLED 134
/*
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h
deleted file mode 100644
index 81dfa3ee5e06..000000000000
--- a/include/asm-generic/ide_iops.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Generic I/O and MEMIO string operations. */
-
-#define __ide_insw insw
-#define __ide_insl insl
-#define __ide_outsw outsw
-#define __ide_outsl outsl
-
-static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u16 *)addr = readw(port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u32 *)addr = readl(port);
- addr += 4;
- }
-}
-
-static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- writew(*(u16 *)addr, port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
-{
- while (count--) {
- writel(*(u32 *)addr, port);
- addr += 4;
- }
-}
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 587e7e9b9a37..bac63e874c7b 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1047,41 +1047,22 @@ static inline void iounmap(volatile void __iomem *addr)
#elif defined(CONFIG_GENERIC_IOREMAP)
#include <linux/pgtable.h>
-/*
- * Arch code can implement the following two hooks when using GENERIC_IOREMAP
- * ioremap_allowed() return a bool,
- * - true means continue to remap
- * - false means skip remap and return directly
- * iounmap_allowed() return a bool,
- * - true means continue to vunmap
- * - false means skip vunmap and return directly
- */
-#ifndef ioremap_allowed
-#define ioremap_allowed ioremap_allowed
-static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
-{
- return true;
-}
-#endif
-
-#ifndef iounmap_allowed
-#define iounmap_allowed iounmap_allowed
-static inline bool iounmap_allowed(void *addr)
-{
- return true;
-}
-#endif
+void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
+ pgprot_t prot);
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot);
void iounmap(volatile void __iomem *addr);
+void generic_iounmap(volatile void __iomem *addr);
+#ifndef ioremap
+#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
{
/* _PAGE_IOREMAP needs to be supplied by the architecture */
return ioremap_prot(addr, size, _PAGE_IOREMAP);
}
+#endif
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
#ifndef ioremap_wc
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 08237ae8b840..196087a8126e 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -93,15 +93,15 @@ extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *);
#endif
-#ifndef ARCH_HAS_IOREMAP_WC
+#ifndef ioremap_wc
#define ioremap_wc ioremap
#endif
-#ifndef ARCH_HAS_IOREMAP_WT
+#ifndef ioremap_wt
#define ioremap_wt ioremap
#endif
-#ifndef ARCH_HAS_IOREMAP_NP
+#ifndef ioremap_np
/* See the comment in asm-generic/io.h about ioremap_np(). */
#define ioremap_np ioremap_np
static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 402a8c1c202d..430f0ae0dde2 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -36,15 +36,22 @@ struct ms_hyperv_info {
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
- u32 isolation_config_a;
+ u8 vtl;
+ union {
+ u32 isolation_config_a;
+ struct {
+ u32 paravisor_present : 1;
+ u32 reserved_a1 : 31;
+ };
+ };
union {
u32 isolation_config_b;
struct {
u32 cvm_type : 4;
- u32 reserved1 : 1;
+ u32 reserved_b1 : 1;
u32 shared_gpa_boundary_active : 1;
u32 shared_gpa_boundary_bits : 6;
- u32 reserved2 : 20;
+ u32 reserved_b2 : 20;
};
};
u64 shared_gpa_boundary;
@@ -57,7 +64,8 @@ extern void * __percpu *hyperv_pcpu_output_arg;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
-extern bool hv_isolation_type_snp(void);
+bool hv_isolation_type_snp(void);
+bool hv_isolation_type_tdx(void);
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
@@ -190,7 +198,7 @@ int hv_common_cpu_die(unsigned int cpu);
void *hv_alloc_hyperv_page(void);
void *hv_alloc_hyperv_zeroed_page(void);
-void hv_free_hyperv_page(unsigned long addr);
+void hv_free_hyperv_page(void *addr);
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
@@ -274,6 +282,7 @@ enum hv_isolation_type hv_get_isolation_type(void);
bool hv_is_isolation_supported(void);
bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
+u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
void hv_setup_dma_ops(struct device *dev, bool coherent);
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
index 1a3ad6d29833..c32e0cf23c90 100644
--- a/include/asm-generic/numa.h
+++ b/include/asm-generic/numa.h
@@ -35,6 +35,7 @@ int __init numa_add_memblk(int nodeid, u64 start, u64 end);
void __init numa_set_distance(int from, int to, int distance);
void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
+int __init early_cpu_to_node(int cpu);
void numa_store_cpu_info(unsigned int cpu);
void numa_add_cpu(unsigned int cpu);
void numa_remove_cpu(unsigned int cpu);
@@ -46,6 +47,7 @@ static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void arch_numa_init(void) { }
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
+static inline int early_cpu_to_node(int cpu) { return 0; }
#endif /* CONFIG_NUMA */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index c0be2edeb484..9773582fd96e 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -74,8 +74,16 @@ extern unsigned long memory_end;
#define __va(x) ((void *)((unsigned long) (x)))
#define __pa(x) ((unsigned long) (x))
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+#define virt_to_pfn virt_to_pfn
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn) << PAGE_SHIFT;
+}
+#define pfn_to_virt pfn_to_virt
#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 6432a7fade91..94cbd50cc870 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -89,27 +89,35 @@ do { \
__ret; \
})
-#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
+#define __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, _cmpxchg) \
+({ \
+ typeof(pcp) __val, __old = *(ovalp); \
+ __val = _cmpxchg(pcp, __old, nval); \
+ if (__val != __old) \
+ *(ovalp) = __val; \
+ __val == __old; \
+})
+
+#define raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \
({ \
typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
- typeof(pcp) __ret; \
- __ret = *__p; \
- if (__ret == (oval)) \
+ typeof(pcp) __val = *__p, ___old = *(ovalp); \
+ bool __ret; \
+ if (__val == ___old) { \
*__p = nval; \
+ __ret = true; \
+ } else { \
+ *(ovalp) = __val; \
+ __ret = false; \
+ } \
__ret; \
})
-#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1)); \
- typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2)); \
- int __ret = 0; \
- if (*__p1 == (oval1) && *__p2 == (oval2)) { \
- *__p1 = nval1; \
- *__p2 = nval2; \
- __ret = 1; \
- } \
- (__ret); \
+ typeof(pcp) __old = (oval); \
+ raw_cpu_generic_try_cmpxchg(pcp, &__old, nval); \
+ __old; \
})
#define __this_cpu_generic_read_nopreempt(pcp) \
@@ -170,23 +178,22 @@ do { \
__ret; \
})
-#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
+#define this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \
({ \
- typeof(pcp) __ret; \
+ bool __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
+ __ret = raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
-#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- int __ret; \
+ typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
- oval1, oval2, nval1, nval2); \
+ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
@@ -282,6 +289,62 @@ do { \
#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
#endif
+#ifndef raw_cpu_try_cmpxchg_1
+#ifdef raw_cpu_cmpxchg_1
+#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_1)
+#else
+#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg_2
+#ifdef raw_cpu_cmpxchg_2
+#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_2)
+#else
+#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg_4
+#ifdef raw_cpu_cmpxchg_4
+#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_4)
+#else
+#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg_8
+#ifdef raw_cpu_cmpxchg_8
+#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_8)
+#else
+#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
+#ifndef raw_cpu_try_cmpxchg64
+#ifdef raw_cpu_cmpxchg64
+#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg64)
+#else
+#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg128
+#ifdef raw_cpu_cmpxchg128
+#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg128)
+#else
+#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
#ifndef raw_cpu_cmpxchg_1
#define raw_cpu_cmpxchg_1(pcp, oval, nval) \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
@@ -299,21 +362,13 @@ do { \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef raw_cpu_cmpxchg_double_1
-#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef raw_cpu_cmpxchg_double_2
-#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef raw_cpu_cmpxchg_double_4
-#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef raw_cpu_cmpxchg64
+#define raw_cpu_cmpxchg64(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef raw_cpu_cmpxchg_double_8
-#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef raw_cpu_cmpxchg128
+#define raw_cpu_cmpxchg128(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#ifndef this_cpu_read_1
@@ -407,6 +462,62 @@ do { \
#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval)
#endif
+#ifndef this_cpu_try_cmpxchg_1
+#ifdef this_cpu_cmpxchg_1
+#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_1)
+#else
+#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg_2
+#ifdef this_cpu_cmpxchg_2
+#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_2)
+#else
+#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg_4
+#ifdef this_cpu_cmpxchg_4
+#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_4)
+#else
+#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg_8
+#ifdef this_cpu_cmpxchg_8
+#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_8)
+#else
+#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
+#ifndef this_cpu_try_cmpxchg64
+#ifdef this_cpu_cmpxchg64
+#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg64)
+#else
+#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg128
+#ifdef this_cpu_cmpxchg128
+#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg128)
+#else
+#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
#ifndef this_cpu_cmpxchg_1
#define this_cpu_cmpxchg_1(pcp, oval, nval) \
this_cpu_generic_cmpxchg(pcp, oval, nval)
@@ -424,21 +535,13 @@ do { \
this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef this_cpu_cmpxchg_double_1
-#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef this_cpu_cmpxchg_double_2
-#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef this_cpu_cmpxchg_double_4
-#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef this_cpu_cmpxchg64
+#define this_cpu_cmpxchg64(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef this_cpu_cmpxchg_double_8
-#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef this_cpu_cmpxchg128
+#define this_cpu_cmpxchg128(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index a7cf825befae..879e5f8aa5e9 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -8,7 +8,7 @@
#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
/**
- * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* This function is intended for architectures that need
@@ -18,12 +18,17 @@
*/
static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
{
- return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL &
+ ~__GFP_HIGHMEM, 0);
+
+ if (!ptdesc)
+ return NULL;
+ return ptdesc_address(ptdesc);
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
/**
- * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* Return: pointer to the allocated memory or %NULL on error
@@ -35,40 +40,40 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
#endif
/**
- * pte_free_kernel - free PTE-level kernel page table page
+ * pte_free_kernel - free PTE-level kernel page table memory
* @mm: the mm_struct of the current context
* @pte: pointer to the memory containing the page table
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- free_page((unsigned long)pte);
+ pagetable_free(virt_to_ptdesc(pte));
}
/**
- * __pte_alloc_one - allocate a page for PTE-level user page table
+ * __pte_alloc_one - allocate memory for a PTE-level user page table
* @mm: the mm_struct of the current context
* @gfp: GFP flags to use for the allocation
*
- * Allocates a page and runs the pgtable_pte_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
*
* This function is intended for architectures that need
* anything beyond simple page allocation or must have custom GFP flags.
*
- * Return: `struct page` initialized as page table or %NULL on error
+ * Return: `struct page` referencing the ptdesc or %NULL on error
*/
static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
{
- struct page *pte;
+ struct ptdesc *ptdesc;
- pte = alloc_page(gfp);
- if (!pte)
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
return NULL;
- if (!pgtable_pte_page_ctor(pte)) {
- __free_page(pte);
+ if (!pagetable_pte_ctor(ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return pte;
+ return ptdesc_page(ptdesc);
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
@@ -76,9 +81,9 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
* pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_pte_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
*
- * Return: `struct page` initialized as page table or %NULL on error
+ * Return: `struct page` referencing the ptdesc or %NULL on error
*/
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
@@ -92,14 +97,16 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
*/
/**
- * pte_free - free PTE-level user page table page
+ * pte_free - free PTE-level user page table memory
* @mm: the mm_struct of the current context
- * @pte_page: the `struct page` representing the page table
+ * @pte_page: the `struct page` referencing the ptdesc
*/
static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
- pgtable_pte_page_dtor(pte_page);
- __free_page(pte_page);
+ struct ptdesc *ptdesc = page_ptdesc(pte_page);
+
+ pagetable_pte_dtor(ptdesc);
+ pagetable_free(ptdesc);
}
@@ -107,10 +114,11 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
/**
- * pmd_alloc_one - allocate a page for PMD-level page table
+ * pmd_alloc_one - allocate memory for a PMD-level page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_pmd_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
+ *
* Allocations use %GFP_PGTABLE_USER in user context and
* %GFP_PGTABLE_KERNEL in kernel context.
*
@@ -118,28 +126,30 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
*/
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- struct page *page;
+ struct ptdesc *ptdesc;
gfp_t gfp = GFP_PGTABLE_USER;
if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL;
- page = alloc_page(gfp);
- if (!page)
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
return NULL;
- if (!pgtable_pmd_page_ctor(page)) {
- __free_page(page);
+ if (!pagetable_pmd_ctor(ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return (pmd_t *)page_address(page);
+ return ptdesc_address(ptdesc);
}
#endif
#ifndef __HAVE_ARCH_PMD_FREE
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
+
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- pgtable_pmd_page_dtor(virt_to_page(pmd));
- free_page((unsigned long)pmd);
+ pagetable_pmd_dtor(ptdesc);
+ pagetable_free(ptdesc);
}
#endif
@@ -150,19 +160,27 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL;
- return (pud_t *)get_zeroed_page(gfp);
+ gfp &= ~__GFP_HIGHMEM;
+
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
+ return NULL;
+
+ pagetable_pud_ctor(ptdesc);
+ return ptdesc_address(ptdesc);
}
#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
/**
- * pud_alloc_one - allocate a page for PUD-level page table
+ * pud_alloc_one - allocate memory for a PUD-level page table
* @mm: the mm_struct of the current context
*
- * Allocates a page using %GFP_PGTABLE_USER for user context and
- * %GFP_PGTABLE_KERNEL for kernel context.
+ * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
+ * and %GFP_PGTABLE_KERNEL for kernel context.
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -174,8 +192,11 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pud);
+
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
- free_page((unsigned long)pud);
+ pagetable_pud_dtor(ptdesc);
+ pagetable_free(ptdesc);
}
#ifndef __HAVE_ARCH_PUD_FREE
@@ -190,7 +211,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
#ifndef __HAVE_ARCH_PGD_FREE
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_page((unsigned long)pgd);
+ pagetable_free(virt_to_ptdesc(pgd));
}
#endif
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index b4d43a4af5f7..51f8f3881523 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -80,9 +80,21 @@ static __always_inline bool should_resched(int preempt_offset)
#ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void);
-#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+void dynamic_preempt_schedule(void);
+void dynamic_preempt_schedule_notrace(void);
+#define __preempt_schedule() dynamic_preempt_schedule()
+#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
+
+#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
+
+#define __preempt_schedule() preempt_schedule()
#define __preempt_schedule_notrace() preempt_schedule_notrace()
+
+#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
#endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 995513fa2690..0655aa5b57b2 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -70,7 +70,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
*/
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
{
- return !atomic_read(&lock.val);
+ return !lock.val.counter;
}
/**
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index fdfebcb050f4..90803a826ba0 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -68,11 +68,18 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
smp_store_release(ptr, (u16)val + 1);
}
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ u32 val = lock.counter;
+
+ return ((val >> 16) == (val & 0xffff));
+}
+
static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- u32 val = atomic_read(lock);
+ arch_spinlock_t val = READ_ONCE(*lock);
- return ((val >> 16) != (val & 0xffff));
+ return !arch_spin_value_unlocked(val);
}
static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -82,11 +89,6 @@ static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
return (s16)((val >> 16) - (val & 0xffff)) > 1;
}
-static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return !arch_spin_is_locked(&lock);
-}
-
#include <asm/qrwlock.h>
#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b46617207c93..129a3a759976 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -456,7 +456,6 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
return;
tlb_flush(tlb);
- mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
__tlb_reset_range(tlb);
}
@@ -481,6 +480,17 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
+static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
+{
+ tlb_remove_table(tlb, pt);
+}
+
+/* Like tlb_remove_ptdesc, but for page-like page directories. */
+static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
+{
+ tlb_remove_page(tlb, ptdesc_page(pt));
+}
+
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 699650f81970..a84c64e5f11e 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -104,9 +104,9 @@ static inline u32 get_unaligned_le24(const void *p)
static inline void __put_unaligned_be24(const u32 val, u8 *p)
{
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
}
static inline void put_unaligned_be24(const u32 val, void *p)
@@ -116,9 +116,9 @@ static inline void put_unaligned_be24(const u32 val, void *p)
static inline void __put_unaligned_le24(const u32 val, u8 *p)
{
- *p++ = val;
- *p++ = val >> 8;
- *p++ = val >> 16;
+ *p++ = val & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = (val >> 16) & 0xff;
}
static inline void put_unaligned_le24(const u32 val, void *p)
@@ -128,12 +128,12 @@ static inline void put_unaligned_le24(const u32 val, void *p)
static inline void __put_unaligned_be48(const u64 val, u8 *p)
{
- *p++ = val >> 40;
- *p++ = val >> 32;
- *p++ = val >> 24;
- *p++ = val >> 16;
- *p++ = val >> 8;
- *p++ = val;
+ *p++ = (val >> 40) & 0xff;
+ *p++ = (val >> 32) & 0xff;
+ *p++ = (val >> 24) & 0xff;
+ *p++ = (val >> 16) & 0xff;
+ *p++ = (val >> 8) & 0xff;
+ *p++ = val & 0xff;
}
static inline void put_unaligned_be48(const u64 val, void *p)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index da9e5629ea43..5dd3a61d673d 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -138,13 +138,6 @@
* are handled as text/data or they can be discarded (which
* often happens at runtime)
*/
-#ifdef CONFIG_HOTPLUG_CPU
-#define CPU_KEEP(sec) *(.cpu##sec)
-#define CPU_DISCARD(sec)
-#else
-#define CPU_KEEP(sec)
-#define CPU_DISCARD(sec) *(.cpu##sec)
-#endif
#if defined(CONFIG_MEMORY_HOTPLUG)
#define MEM_KEEP(sec) *(.mem##sec)
@@ -363,7 +356,6 @@
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data*) \
- MEM_KEEP(exit.data*) \
*(.data.unlikely) \
__start_once = .; \
*(.data.once) \
@@ -378,7 +370,8 @@
BRANCH_PROFILE() \
TRACE_PRINTKS() \
BPF_RAW_TP() \
- TRACEPOINT_STR()
+ TRACEPOINT_STR() \
+ KUNIT_TABLE()
/*
* Data section helpers
@@ -528,7 +521,6 @@
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
*(.ref.rodata) \
MEM_KEEP(init.rodata) \
- MEM_KEEP(exit.rodata) \
} \
\
/* Built-in module parameters. */ \
@@ -578,11 +570,9 @@
*(.text.unlikely .text.unlikely.*) \
*(.text.unknown .text.unknown.*) \
NOINSTR_TEXT \
- *(.text..refcount) \
*(.ref.text) \
*(.text.asan.* .text.tsan.*) \
MEM_KEEP(init.text*) \
- MEM_KEEP(exit.text*) \
/* sched.text is aling to function alignment to secure we have same
@@ -688,7 +678,7 @@
/* init and exit section handling */
#define INIT_DATA \
KEEP(*(SORT(___kentry+*))) \
- *(.init.data init.data.*) \
+ *(.init.data .init.data.*) \
MEM_DISCARD(init.data*) \
KERNEL_CTORS() \
MCOUNT_REC() \
@@ -711,7 +701,7 @@
EARLYCON_TABLE() \
LSM_TABLE() \
EARLY_LSM_TABLE() \
- KUNIT_TABLE()
+ KUNIT_INIT_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
@@ -722,13 +712,10 @@
*(.exit.data .exit.data.*) \
*(.fini_array .fini_array.*) \
*(.dtors .dtors.*) \
- MEM_DISCARD(exit.data*) \
- MEM_DISCARD(exit.rodata*)
#define EXIT_TEXT \
*(.exit.text) \
*(.text.exit) \
- MEM_DISCARD(exit.text)
#define EXIT_CALL \
*(.exitcall.exit)
@@ -940,6 +927,12 @@
. = ALIGN(8); \
BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end)
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_INIT_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_POST_LABEL(.kunit_init_test_suites, \
+ __kunit_init_suites, _start, _end)
+
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
@@ -1016,6 +1009,7 @@
PATCHABLE_DISCARDS \
*(.discard) \
*(.discard.*) \
+ *(.export_symbol) \
*(.modinfo) \
/* ld.bfd warns about .gnu.version* even when not emitted */ \
*(.gnu.version*) \
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 20c93f08c993..95a1d214108a 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -38,7 +38,7 @@ static inline long find_zero(unsigned long mask)
return (mask >> 8) ? byte : byte + 1;
}
-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
{
unsigned long rhs = val | c->low_bits;
*data = rhs;
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index 536f897375d0..6cdc873ac907 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -38,8 +38,9 @@ extern void hv_remap_tsc_clocksource(void);
extern unsigned long hv_get_tsc_pfn(void);
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
-static inline notrace u64
-hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
+static __always_inline bool
+hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
+ u64 *cur_tsc, u64 *time)
{
u64 scale, offset;
u32 sequence;
@@ -63,7 +64,7 @@ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
do {
sequence = READ_ONCE(tsc_pg->tsc_sequence);
if (!sequence)
- return U64_MAX;
+ return false;
/*
* Make sure we read sequence before we read other values from
* TSC page.
@@ -82,15 +83,8 @@ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
} while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
- return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
-}
-
-static inline notrace u64
-hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
-{
- u64 cur_tsc;
-
- return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
+ *time = mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
+ return true;
}
#else /* CONFIG_HYPERV_TIMER */
@@ -104,10 +98,10 @@ static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
return NULL;
}
-static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
- u64 *cur_tsc)
+static __always_inline bool
+hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc, u64 *time)
{
- return U64_MAX;
+ return false;
}
static inline int hv_stimer_cleanup(unsigned int cpu) { return 0; }
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 35e45b854a6f..51382befbe37 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -217,6 +217,18 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
+/**
+ * crypto_has_aead() - Search for the availability of an aead.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * aead
+ * @type: specifies the type of the aead
+ * @mask: specifies the mask for the aead
+ *
+ * Return: true when the aead is known to the kernel crypto API; false
+ * otherwise
+ */
+int crypto_has_aead(const char *alg_name, u32 type, u32 mask);
+
static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm)
{
return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index f35fd653e4e5..31c111bebb68 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -374,6 +374,42 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
}
/**
+ * crypto_akcipher_sync_encrypt() - Invoke public key encrypt operation
+ *
+ * Function invokes the specific public key encrypt operation for a given
+ * public key algorithm
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
+ * crypto_akcipher_sync_decrypt() - Invoke public key decrypt operation
+ *
+ * Function invokes the specific public key decrypt operation for a given
+ * public key algorithm
+ *
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: Output length on success; error code in case of error
+ */
+int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
* crypto_akcipher_sign() - Invoke public key sign operation
*
* Function invokes the specific public key sign operation for a given
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 016d5a302b84..7a4a71af653f 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -12,6 +12,7 @@
#include <linux/cache.h>
#include <linux/crypto.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
/*
* Maximum values for blocksize and alignmask, used to allocate
@@ -56,7 +57,6 @@ struct sk_buff;
struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
unsigned int (*extsize)(struct crypto_alg *alg);
- int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
int (*init_tfm)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
@@ -83,6 +83,8 @@ struct crypto_instance {
struct crypto_spawn *spawns;
};
+ struct work_struct free_work;
+
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -193,11 +195,6 @@ static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
}
-static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
-{
- return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1);
-}
-
static inline unsigned int crypto_dma_align(void)
{
return CRYPTO_DMA_ALIGN;
diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h
index 0b8e6bc55301..f3b37cbb3131 100644
--- a/include/crypto/b128ops.h
+++ b/include/crypto/b128ops.h
@@ -50,10 +50,6 @@
#include <linux/types.h>
typedef struct {
- u64 a, b;
-} u128;
-
-typedef struct {
__be64 a, b;
} be128;
@@ -61,20 +57,16 @@ typedef struct {
__le64 b, a;
} le128;
-static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
+static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
{
r->a = p->a ^ q->a;
r->b = p->b ^ q->b;
}
-static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
-{
- u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
-}
-
static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
{
- u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+ r->a = p->a ^ q->a;
+ r->b = p->b ^ q->b;
}
#endif /* _CRYPTO_B128OPS_H */
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index ae133e98d813..545dbefe3e13 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -7,91 +7,47 @@
#ifndef _CRYPTO_ENGINE_H
#define _CRYPTO_ENGINE_H
-#include <linux/crypto.h>
-#include <linux/list.h>
-#include <linux/kthread.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
-#include <crypto/skcipher.h>
#include <crypto/kpp.h>
+#include <crypto/skcipher.h>
+#include <linux/types.h>
+struct crypto_engine;
struct device;
-#define ENGINE_NAME_LEN 30
-/*
- * struct crypto_engine - crypto hardware engine
- * @name: the engine name
- * @idling: the engine is entering idle state
- * @busy: request pump is busy
- * @running: the engine is on working
- * @retry_support: indication that the hardware allows re-execution
- * of a failed backlog request
- * crypto-engine, in head position to keep order
- * @list: link with the global crypto engine list
- * @queue_lock: spinlock to synchronise access to request queue
- * @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
- * @kworker: kthread worker struct for request pump
- * @pump_requests: work struct for scheduling work to the request pump
- * @priv_data: the engine private data
- * @cur_req: the current request which is on processing
- */
-struct crypto_engine {
- char name[ENGINE_NAME_LEN];
- bool idling;
- bool busy;
- bool running;
-
- bool retry_support;
-
- struct list_head list;
- spinlock_t queue_lock;
- struct crypto_queue queue;
- struct device *dev;
-
- bool rt;
-
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
-
- struct kthread_worker *kworker;
- struct kthread_work pump_requests;
-
- void *priv_data;
- struct crypto_async_request *cur_req;
-};
-
/*
* struct crypto_engine_op - crypto hardware engine operations
- * @prepare__request: do some prepare if need before handle the current request
- * @unprepare_request: undo any work done by prepare_request()
* @do_one_request: do encryption for current request
*/
struct crypto_engine_op {
- int (*prepare_request)(struct crypto_engine *engine,
- void *areq);
- int (*unprepare_request)(struct crypto_engine *engine,
- void *areq);
int (*do_one_request)(struct crypto_engine *engine,
void *areq);
};
-struct crypto_engine_ctx {
+struct aead_engine_alg {
+ struct aead_alg base;
+ struct crypto_engine_op op;
+};
+
+struct ahash_engine_alg {
+ struct ahash_alg base;
+ struct crypto_engine_op op;
+};
+
+struct akcipher_engine_alg {
+ struct akcipher_alg base;
+ struct crypto_engine_op op;
+};
+
+struct kpp_engine_alg {
+ struct kpp_alg base;
+ struct crypto_engine_op op;
+};
+
+struct skcipher_engine_alg {
+ struct skcipher_alg base;
struct crypto_engine_op op;
};
@@ -122,6 +78,30 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
-int crypto_engine_exit(struct crypto_engine *engine);
+void crypto_engine_exit(struct crypto_engine *engine);
+
+int crypto_engine_register_aead(struct aead_engine_alg *alg);
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg);
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count);
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg);
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg);
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count);
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+ int count);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg);
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg);
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg);
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg);
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+ int count);
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+ int count);
#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index e69542d86a2b..5d61f576cfc8 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -212,13 +212,9 @@ struct shash_desc {
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
* @clone_tfm: Copy transform into new object, may allocate memory.
- * @digestsize: see struct ahash_alg
- * @statesize: see struct ahash_alg
* @descsize: Size of the operational state for the message digest. This state
* size is the memory size that needs to be allocated for
* shash_desc.__ctx
- * @stat: Statistics for hash algorithm.
- * @base: internally used
* @halg: see struct hash_alg_common
* @HASH_ALG_COMMON: see struct hash_alg_common
*/
@@ -250,16 +246,8 @@ struct shash_alg {
#undef HASH_ALG_COMMON_STAT
struct crypto_ahash {
- int (*init)(struct ahash_request *req);
- int (*update)(struct ahash_request *req);
- int (*final)(struct ahash_request *req);
- int (*finup)(struct ahash_request *req);
- int (*digest)(struct ahash_request *req);
- int (*export)(struct ahash_request *req, void *out);
- int (*import)(struct ahash_request *req, const void *in);
- int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen);
-
+ bool using_shash; /* Underlying algorithm is shash, not ahash */
+ unsigned int statesize;
unsigned int reqsize;
struct crypto_tfm base;
};
@@ -341,12 +329,6 @@ static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
}
-static inline unsigned int crypto_ahash_alignmask(
- struct crypto_ahash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
-}
-
/**
* crypto_ahash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -400,7 +382,7 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
*/
static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
{
- return crypto_hash_alg_common(tfm)->statesize;
+ return tfm->statesize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
@@ -518,10 +500,7 @@ int crypto_ahash_digest(struct ahash_request *req);
*
* Return: 0 if the export was successful; < 0 if an error occurred
*/
-static inline int crypto_ahash_export(struct ahash_request *req, void *out)
-{
- return crypto_ahash_reqtfm(req)->export(req, out);
-}
+int crypto_ahash_export(struct ahash_request *req, void *out);
/**
* crypto_ahash_import() - import message digest state
@@ -534,15 +513,7 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
*
* Return: 0 if the import was successful; < 0 if an error occurred
*/
-static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->import(req, in);
-}
+int crypto_ahash_import(struct ahash_request *req, const void *in);
/**
* crypto_ahash_init() - (re)initialize message digest handle
@@ -555,36 +526,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
*
* Return: see crypto_ahash_final()
*/
-static inline int crypto_ahash_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->init(req);
-}
-
-static inline struct crypto_istat_hash *hash_get_stat(
- struct hash_alg_common *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- return &alg->stat;
-#else
- return NULL;
-#endif
-}
-
-static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err)
-{
- if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
- return err;
-
- if (err && err != -EINPROGRESS && err != -EBUSY)
- atomic64_inc(&hash_get_stat(alg)->err_cnt);
-
- return err;
-}
+int crypto_ahash_init(struct ahash_request *req);
/**
* crypto_ahash_update() - add data to message digest for processing
@@ -597,16 +539,7 @@ static inline int crypto_hash_errstat(struct hash_alg_common *alg, int err)
*
* Return: see crypto_ahash_final()
*/
-static inline int crypto_ahash_update(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
-
- if (IS_ENABLED(CONFIG_CRYPTO_STATS))
- atomic64_add(req->nbytes, &hash_get_stat(alg)->hash_tlen);
-
- return crypto_hash_errstat(alg, tfm->update(req));
-}
+int crypto_ahash_update(struct ahash_request *req);
/**
* DOC: Asynchronous Hash Request Handle
@@ -797,12 +730,6 @@ static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
}
-static inline unsigned int crypto_shash_alignmask(
- struct crypto_shash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
-}
-
/**
* crypto_shash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -951,10 +878,7 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
* Context: Any context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
-static inline int crypto_shash_export(struct shash_desc *desc, void *out)
-{
- return crypto_shash_alg(desc->tfm)->export(desc, out);
-}
+int crypto_shash_export(struct shash_desc *desc, void *out);
/**
* crypto_shash_import() - import operational state
@@ -968,15 +892,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
* Context: Any context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
-static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
-{
- struct crypto_shash *tfm = desc->tfm;
-
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_shash_alg(tfm)->import(desc, in);
-}
+int crypto_shash_import(struct shash_desc *desc, const void *in);
/**
* crypto_shash_init() - (re)initialize message digest
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
index dd4f06785049..d6927739f8b2 100644
--- a/include/crypto/hash_info.h
+++ b/include/crypto/hash_info.h
@@ -10,6 +10,7 @@
#include <crypto/sha1.h>
#include <crypto/sha2.h>
+#include <crypto/sha3.h>
#include <crypto/md5.h>
#include <crypto/streebog.h>
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 7e76623f9ec3..78ecaf5db04c 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -56,9 +56,9 @@ struct af_alg_type {
};
struct af_alg_sgl {
- struct scatterlist sg[ALG_MAX_PAGES + 1];
- struct page *pages[ALG_MAX_PAGES];
- unsigned int npages;
+ struct sg_table sgt;
+ struct scatterlist sgl[ALG_MAX_PAGES + 1];
+ bool need_unpin;
};
/* TX SGL entry */
@@ -121,6 +121,7 @@ struct af_alg_async_req {
*
* @tsgl_list: Link to TX SGL
* @iv: IV for cipher operation
+ * @state: Existing state for continuing operation
* @aead_assoclen: Length of AAD for AEAD cipher operations
* @completion: Work queue for synchronous operation
* @used: TX bytes sent to kernel. This variable is used to
@@ -136,11 +137,13 @@ struct af_alg_async_req {
* recvmsg is invoked.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
+ * @inflight: Non-zero when AIO requests are in flight.
*/
struct af_alg_ctx {
struct list_head tsgl_list;
void *iv;
+ void *state;
size_t aead_assoclen;
struct crypto_wait wait;
@@ -154,6 +157,8 @@ struct af_alg_ctx {
bool init;
unsigned int len;
+
+ unsigned int inflight;
};
int af_alg_register_type(const struct af_alg_type *type);
@@ -163,7 +168,6 @@ int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
static inline struct alg_sock *alg_sk(struct sock *sk)
@@ -230,8 +234,6 @@ void af_alg_wmem_wakeup(struct sock *sk);
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
-ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(void *data, int err);
__poll_t af_alg_poll(struct file *file, struct socket *sock,
diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h
index a9174ba90250..5030f6d2df31 100644
--- a/include/crypto/internal/cipher.h
+++ b/include/crypto/internal/cipher.h
@@ -176,6 +176,8 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src);
+struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher);
+
struct crypto_cipher_spawn {
struct crypto_spawn base;
};
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
new file mode 100644
index 000000000000..fbf4be56cf12
--- /dev/null
+++ b/include/crypto/internal/engine.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_ENGINE_H
+#define _CRYPTO_INTERNAL_ENGINE_H
+
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <linux/kthread.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#define ENGINE_NAME_LEN 30
+
+struct device;
+
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @retry_support: indication that the hardware allows re-execution
+ * of a failed backlog request
+ * crypto-engine, in head position to keep order
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to synchronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @do_batch_requests: execute a batch of requests. Depends on multiple
+ * requests support.
+ * @kworker: kthread worker struct for request pump
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+ char name[ENGINE_NAME_LEN];
+ bool idling;
+ bool busy;
+ bool running;
+
+ bool retry_support;
+
+ struct list_head list;
+ spinlock_t queue_lock;
+ struct crypto_queue queue;
+ struct device *dev;
+
+ bool rt;
+
+ int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*do_batch_requests)(struct crypto_engine *engine);
+
+
+ struct kthread_worker *kworker;
+ struct kthread_work pump_requests;
+
+ void *priv_data;
+ struct crypto_async_request *cur_req;
+};
+
+#endif
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 37edf3f4e8af..59c707e4dea4 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -18,15 +18,13 @@ struct crypto_hash_walk {
char *data;
unsigned int offset;
- unsigned int alignmask;
+ unsigned int flags;
struct page *pg;
unsigned int entrylen;
unsigned int total;
struct scatterlist *sg;
-
- unsigned int flags;
};
struct ahash_instance {
@@ -149,6 +147,18 @@ static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
halg);
}
+static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+{
+ return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
+ halg);
+}
+
+static inline void crypto_ahash_set_statesize(struct crypto_ahash *tfm,
+ unsigned int size)
+{
+ tfm->statesize = size;
+}
+
static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
unsigned int reqsize)
{
@@ -257,11 +267,6 @@ static inline struct crypto_shash *crypto_spawn_shash(
return crypto_spawn_tfm2(&spawn->base);
}
-static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
-{
- return crypto_tfm_ctx_aligned(&tfm->base);
-}
-
static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_shash, base);
diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h
new file mode 100644
index 000000000000..97cb26ef8115
--- /dev/null
+++ b/include/crypto/internal/sig.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_SIG_H
+#define _CRYPTO_INTERNAL_SIG_H
+
+#include <crypto/algapi.h>
+#include <crypto/sig.h>
+
+static inline void *crypto_sig_ctx(struct crypto_sig *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+#endif
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index fb3d9e899f52..7ae42afdcf3e 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -36,10 +36,25 @@ struct skcipher_instance {
};
};
+struct lskcipher_instance {
+ void (*free)(struct lskcipher_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct lskcipher_alg, co.base)];
+ struct crypto_instance base;
+ } s;
+ struct lskcipher_alg alg;
+ };
+};
+
struct crypto_skcipher_spawn {
struct crypto_spawn base;
};
+struct crypto_lskcipher_spawn {
+ struct crypto_spawn base;
+};
+
struct skcipher_walk {
union {
struct {
@@ -80,6 +95,12 @@ static inline struct crypto_instance *skcipher_crypto_instance(
return &inst->s.base;
}
+static inline struct crypto_instance *lskcipher_crypto_instance(
+ struct lskcipher_instance *inst)
+{
+ return &inst->s.base;
+}
+
static inline struct skcipher_instance *skcipher_alg_instance(
struct crypto_skcipher *skcipher)
{
@@ -87,11 +108,23 @@ static inline struct skcipher_instance *skcipher_alg_instance(
struct skcipher_instance, alg);
}
+static inline struct lskcipher_instance *lskcipher_alg_instance(
+ struct crypto_lskcipher *lskcipher)
+{
+ return container_of(crypto_lskcipher_alg(lskcipher),
+ struct lskcipher_instance, alg);
+}
+
static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
{
return crypto_instance_ctx(skcipher_crypto_instance(inst));
}
+static inline void *lskcipher_instance_ctx(struct lskcipher_instance *inst)
+{
+ return crypto_instance_ctx(lskcipher_crypto_instance(inst));
+}
+
static inline void skcipher_request_complete(struct skcipher_request *req, int err)
{
crypto_request_complete(&req->base, err);
@@ -101,21 +134,36 @@ int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
+int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
- struct crypto_skcipher_spawn *spawn)
+static inline void crypto_drop_lskcipher(struct crypto_lskcipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct lskcipher_alg *crypto_lskcipher_spawn_alg(
+ struct crypto_lskcipher_spawn *spawn)
{
- return container_of(spawn->base.alg, struct skcipher_alg, base);
+ return container_of(spawn->base.alg, struct lskcipher_alg, co.base);
}
-static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
+static inline struct skcipher_alg_common *crypto_spawn_skcipher_alg_common(
struct crypto_skcipher_spawn *spawn)
{
- return crypto_skcipher_spawn_alg(spawn);
+ return container_of(spawn->base.alg, struct skcipher_alg_common, base);
+}
+
+static inline struct lskcipher_alg *crypto_spawn_lskcipher_alg(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return crypto_lskcipher_spawn_alg(spawn);
}
static inline struct crypto_skcipher *crypto_spawn_skcipher(
@@ -124,6 +172,12 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher(
return crypto_spawn_tfm2(&spawn->base);
}
+static inline struct crypto_lskcipher *crypto_spawn_lskcipher(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
static inline void crypto_skcipher_set_reqsize(
struct crypto_skcipher *skcipher, unsigned int reqsize)
{
@@ -144,6 +198,13 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
int skcipher_register_instance(struct crypto_template *tmpl,
struct skcipher_instance *inst);
+int crypto_register_lskcipher(struct lskcipher_alg *alg);
+void crypto_unregister_lskcipher(struct lskcipher_alg *alg);
+int crypto_register_lskciphers(struct lskcipher_alg *algs, int count);
+void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
+int lskcipher_register_instance(struct crypto_template *tmpl,
+ struct lskcipher_instance *inst);
+
int skcipher_walk_done(struct skcipher_walk *walk, int err);
int skcipher_walk_virt(struct skcipher_walk *walk,
struct skcipher_request *req,
@@ -166,6 +227,11 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
return crypto_tfm_ctx(&tfm->base);
}
+static inline void *crypto_lskcipher_ctx(struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx_dma(&tfm->base);
@@ -191,41 +257,6 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req)
return req->base.flags;
}
-static inline unsigned int crypto_skcipher_alg_min_keysize(
- struct skcipher_alg *alg)
-{
- return alg->min_keysize;
-}
-
-static inline unsigned int crypto_skcipher_alg_max_keysize(
- struct skcipher_alg *alg)
-{
- return alg->max_keysize;
-}
-
-static inline unsigned int crypto_skcipher_alg_walksize(
- struct skcipher_alg *alg)
-{
- return alg->walksize;
-}
-
-/**
- * crypto_skcipher_walksize() - obtain walk size
- * @tfm: cipher handle
- *
- * In some cases, algorithms can only perform optimally when operating on
- * multiple blocks in parallel. This is reflected by the walksize, which
- * must be a multiple of the chunksize (or equal if the concern does not
- * apply)
- *
- * Return: walk size in bytes
- */
-static inline unsigned int crypto_skcipher_walksize(
- struct crypto_skcipher *tfm)
-{
- return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
-}
-
/* Helpers for simple block cipher modes of operation */
struct skcipher_ctx_simple {
struct crypto_cipher *cipher; /* underlying block cipher */
@@ -249,5 +280,24 @@ static inline struct crypto_alg *skcipher_ialg_simple(
return crypto_spawn_cipher_alg(spawn);
}
+static inline struct crypto_lskcipher *lskcipher_cipher_simple(
+ struct crypto_lskcipher *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+
+ return *ctx;
+}
+
+struct lskcipher_instance *lskcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb);
+
+static inline struct lskcipher_alg *lskcipher_ialg_simple(
+ struct lskcipher_instance *inst)
+{
+ struct crypto_lskcipher_spawn *spawn = lskcipher_instance_ctx(inst);
+
+ return crypto_lskcipher_spawn_alg(spawn);
+}
+
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 653992a6e941..462f8a34cdf8 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -48,8 +48,6 @@ struct public_key_signature {
const char *pkey_algo;
const char *hash_algo;
const char *encoding;
- const void *data;
- unsigned int data_size;
};
extern void public_key_signature_free(struct public_key_signature *sig);
@@ -80,6 +78,10 @@ extern int restrict_link_by_ca(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *trust_keyring);
+int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring);
#else
static inline int restrict_link_by_ca(struct key *dest_keyring,
const struct key_type *type,
@@ -88,6 +90,14 @@ static inline int restrict_link_by_ca(struct key *dest_keyring,
{
return 0;
}
+
+static inline int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ return 0;
+}
#endif
extern int query_asymmetric_key(const struct kernel_pkey_params *,
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
index 2838f529f31e..b9e9281d76c9 100644
--- a/include/crypto/sha2.h
+++ b/include/crypto/sha2.h
@@ -128,7 +128,7 @@ static inline void sha224_init(struct sha256_state *sctx)
sctx->state[7] = SHA224_H7;
sctx->count = 0;
}
-void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
+/* Simply use sha256_update as it is equivalent to sha224_update. */
void sha224_final(struct sha256_state *sctx, u8 *out);
#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
index 76173c613058..ab904d82236f 100644
--- a/include/crypto/sha256_base.h
+++ b/include/crypto/sha256_base.h
@@ -8,13 +8,12 @@
#ifndef _CRYPTO_SHA256_BASE_H
#define _CRYPTO_SHA256_BASE_H
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
#include <linux/string.h>
-
-#include <asm/unaligned.h>
+#include <linux/types.h>
typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
int blocks);
@@ -35,12 +34,11 @@ static inline int sha256_base_init(struct shash_desc *desc)
return 0;
}
-static inline int sha256_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
+static inline int lib_sha256_base_do_update(struct sha256_state *sctx,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
{
- struct sha256_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx->count += len;
@@ -73,11 +71,20 @@ static inline int sha256_base_do_update(struct shash_desc *desc,
return 0;
}
-static inline int sha256_base_do_finalize(struct shash_desc *desc,
- sha256_block_fn *block_fn)
+static inline int sha256_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
{
- const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_do_update(sctx, data, len, block_fn);
+}
+
+static inline int lib_sha256_base_do_finalize(struct sha256_state *sctx,
+ sha256_block_fn *block_fn)
+{
+ const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
__be64 *bits = (__be64 *)(sctx->buf + bit_offset);
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
@@ -96,10 +103,17 @@ static inline int sha256_base_do_finalize(struct shash_desc *desc,
return 0;
}
-static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+static inline int sha256_base_do_finalize(struct shash_desc *desc,
+ sha256_block_fn *block_fn)
{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_do_finalize(sctx, block_fn);
+}
+
+static inline int lib_sha256_base_finish(struct sha256_state *sctx, u8 *out,
+ unsigned int digest_size)
+{
__be32 *digest = (__be32 *)out;
int i;
@@ -110,4 +124,12 @@ static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
return 0;
}
+static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ return lib_sha256_base_finish(sctx, out, digest_size);
+}
+
#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
new file mode 100644
index 000000000000..d25186bb2be3
--- /dev/null
+++ b/include/crypto/sig.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_SIG_H
+#define _CRYPTO_SIG_H
+
+#include <linux/crypto.h>
+
+/**
+ * struct crypto_sig - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_sig {
+ struct crypto_tfm base;
+};
+
+/**
+ * DOC: Generic Public Key Signature API
+ *
+ * The Public Key Signature API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_SIG (listed as type "sig" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_sig() - allocate signature tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * signing algorithm e.g. "ecdsa"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for public key signature algorithm. The returned struct
+ * crypto_sig is the handle that is required for any subsequent
+ * API invocation for signature operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_sig() - free signature tfm handle
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
+ */
+static inline void crypto_free_sig(struct crypto_sig *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_sig_tfm(tfm));
+}
+
+/**
+ * crypto_sig_maxsize() - Get len for output buffer
+ *
+ * Function returns the dest buffer size required for a given key.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you will end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+int crypto_sig_maxsize(struct crypto_sig *tfm);
+
+/**
+ * crypto_sig_sign() - Invoke signing operation
+ *
+ * Function invokes the specific signing operation for a given algorithm
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+
+/**
+ * crypto_sig_verify() - Invoke signature verification
+ *
+ * Function invokes the specific signature verification operation
+ * for a given algorithm.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @digest: digest
+ * @dlen: digest length
+ *
+ * Return: zero on verification success; error code in case of error.
+ */
+int crypto_sig_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen);
+
+/**
+ * crypto_sig_set_pubkey() - Invoke set public key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded public key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_set_pubkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+
+/**
+ * crypto_sig_set_privkey() - Invoke set private key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded private key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_sig_set_privkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 080d1ba3611d..c8857d7bdb37 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -15,6 +15,17 @@
#include <linux/string.h>
#include <linux/types.h>
+/* Set this bit if the lskcipher operation is a continuation. */
+#define CRYPTO_LSKCIPHER_FLAG_CONT 0x00000001
+/* Set this bit if the lskcipher operation is final. */
+#define CRYPTO_LSKCIPHER_FLAG_FINAL 0x00000002
+/* The bit CRYPTO_TFM_REQ_MAY_SLEEP can also be set if needed. */
+
+/* Set this bit if the skcipher operation is a continuation. */
+#define CRYPTO_SKCIPHER_REQ_CONT 0x00000001
+/* Set this bit if the skcipher operation is not final. */
+#define CRYPTO_SKCIPHER_REQ_NOTFINAL 0x00000002
+
struct scatterlist;
/**
@@ -49,6 +60,10 @@ struct crypto_sync_skcipher {
struct crypto_skcipher base;
};
+struct crypto_lskcipher {
+ struct crypto_tfm base;
+};
+
/*
* struct crypto_istat_cipher - statistics for cipher algorithm
* @encrypt_cnt: number of encrypt requests
@@ -65,8 +80,14 @@ struct crypto_istat_cipher {
atomic64_t err_cnt;
};
-/**
- * struct skcipher_alg - symmetric key cipher definition
+#ifdef CONFIG_CRYPTO_STATS
+#define SKCIPHER_ALG_COMMON_STAT struct crypto_istat_cipher stat;
+#else
+#define SKCIPHER_ALG_COMMON_STAT
+#endif
+
+/*
+ * struct skcipher_alg_common - common properties of skcipher_alg
* @min_keysize: Minimum key size supported by the transformation. This is the
* smallest key length supported by this transformation algorithm.
* This must be set to one of the pre-defined values as this is
@@ -77,6 +98,29 @@ struct crypto_istat_cipher {
* This must be set to one of the pre-defined values as this is
* not hardware specific. Possible values for this field can be
* found via git grep "_MAX_KEY_SIZE" include/crypto/
+ * @ivsize: IV size applicable for transformation. The consumer must provide an
+ * IV of exactly that size to perform the encrypt or decrypt operation.
+ * @chunksize: Equal to the block size except for stream ciphers such as
+ * CTR where it is set to the underlying block size.
+ * @statesize: Size of the internal state for the algorithm.
+ * @stat: Statistics for cipher algorithm
+ * @base: Definition of a generic crypto algorithm.
+ */
+#define SKCIPHER_ALG_COMMON { \
+ unsigned int min_keysize; \
+ unsigned int max_keysize; \
+ unsigned int ivsize; \
+ unsigned int chunksize; \
+ unsigned int statesize; \
+ \
+ SKCIPHER_ALG_COMMON_STAT \
+ \
+ struct crypto_alg base; \
+}
+struct skcipher_alg_common SKCIPHER_ALG_COMMON;
+
+/**
+ * struct skcipher_alg - symmetric key cipher definition
* @setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
@@ -100,6 +144,17 @@ struct crypto_istat_cipher {
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
+ * @export: Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
@@ -111,15 +166,10 @@ struct crypto_istat_cipher {
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- * @chunksize: Equal to the block size except for stream ciphers such as
- * CTR where it is set to the underlying block size.
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
- * @stat: Statistics for cipher algorithm
- * @base: Definition of a generic crypto algorithm.
+ * @co: see struct skcipher_alg_common
*
* All fields except @ivsize are mandatory and must be filled.
*/
@@ -128,20 +178,63 @@ struct skcipher_alg {
unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
+ int (*export)(struct skcipher_request *req, void *out);
+ int (*import)(struct skcipher_request *req, const void *in);
int (*init)(struct crypto_skcipher *tfm);
void (*exit)(struct crypto_skcipher *tfm);
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
- unsigned int chunksize;
unsigned int walksize;
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_istat_cipher stat;
-#endif
+ union {
+ struct SKCIPHER_ALG_COMMON;
+ struct skcipher_alg_common co;
+ };
+};
- struct crypto_alg base;
+/**
+ * struct lskcipher_alg - linear symmetric key cipher definition
+ * @setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function can
+ * be called multiple times during the existence of the transformation
+ * object, so one must make sure the key is properly reprogrammed into
+ * the hardware. This function is also responsible for checking the key
+ * length for validity. In case a software fallback was put in place in
+ * the @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes.
+ * @encrypt: Encrypt a number of bytes. This function is used to encrypt
+ * the supplied data. This function shall not modify
+ * the transformation context, as this function may be called
+ * in parallel with the same transformation object. Data
+ * may be left over if length is not a multiple of blocks
+ * and there is more to come (final == false). The number of
+ * left-over bytes should be returned in case of success.
+ * The siv field shall be as long as ivsize + statesize with
+ * the IV placed at the front. The state will be used by the
+ * algorithm internally.
+ * @decrypt: Decrypt a number of bytes. This is a reverse counterpart to
+ * @encrypt and the conditions are exactly the same.
+ * @init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ * @co: see struct skcipher_alg_common
+ */
+struct lskcipher_alg {
+ int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+ int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+ int (*init)(struct crypto_lskcipher *tfm);
+ void (*exit)(struct crypto_lskcipher *tfm);
+
+ struct skcipher_alg_common co;
};
#define MAX_SYNC_SKCIPHER_REQSIZE 384
@@ -213,12 +306,36 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
u32 type, u32 mask);
+
+/**
+ * crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * lskcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an lskcipher. The returned struct
+ * crypto_lskcipher is the cipher handle that is required for any subsequent
+ * API invocation for that lskcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
+ u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_lskcipher_tfm(
+ struct crypto_lskcipher *tfm)
+{
+ return &tfm->base;
+}
+
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
@@ -236,6 +353,17 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
}
/**
+ * crypto_free_lskcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
+ */
+static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm));
+}
+
+/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher
@@ -253,6 +381,19 @@ static inline const char *crypto_skcipher_driver_name(
return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
}
+static inline const char *crypto_lskcipher_driver_name(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm));
+}
+
+static inline struct skcipher_alg_common *crypto_skcipher_alg_common(
+ struct crypto_skcipher *tfm)
+{
+ return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
+ struct skcipher_alg_common, base);
+}
+
static inline struct skcipher_alg *crypto_skcipher_alg(
struct crypto_skcipher *tfm)
{
@@ -260,9 +401,11 @@ static inline struct skcipher_alg *crypto_skcipher_alg(
struct skcipher_alg, base);
}
-static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
+static inline struct lskcipher_alg *crypto_lskcipher_alg(
+ struct crypto_lskcipher *tfm)
{
- return alg->ivsize;
+ return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg,
+ struct lskcipher_alg, co.base);
}
/**
@@ -276,7 +419,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
*/
static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->ivsize;
+ return crypto_skcipher_alg_common(tfm)->ivsize;
}
static inline unsigned int crypto_sync_skcipher_ivsize(
@@ -286,6 +429,21 @@ static inline unsigned int crypto_sync_skcipher_ivsize(
}
/**
+ * crypto_lskcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the lskcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_lskcipher_ivsize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.ivsize;
+}
+
+/**
* crypto_skcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle
*
@@ -301,10 +459,20 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
-static inline unsigned int crypto_skcipher_alg_chunksize(
- struct skcipher_alg *alg)
+/**
+ * crypto_lskcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the lskcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_lskcipher_blocksize(
+ struct crypto_lskcipher *tfm)
{
- return alg->chunksize;
+ return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm));
}
/**
@@ -321,7 +489,58 @@ static inline unsigned int crypto_skcipher_alg_chunksize(
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
+ return crypto_skcipher_alg_common(tfm)->chunksize;
+}
+
+/**
+ * crypto_lskcipher_chunksize() - obtain chunk size
+ * @tfm: cipher handle
+ *
+ * The block size is set to one for ciphers such as CTR. However,
+ * you still need to provide incremental updates in multiples of
+ * the underlying block size as the IV does not have sub-block
+ * granularity. This is known in this API as the chunk size.
+ *
+ * Return: chunk size in bytes
+ */
+static inline unsigned int crypto_lskcipher_chunksize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.chunksize;
+}
+
+/**
+ * crypto_skcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_skcipher_statesize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_common(tfm)->statesize;
+}
+
+/**
+ * crypto_lskcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_lskcipher_statesize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.statesize;
}
static inline unsigned int crypto_sync_skcipher_blocksize(
@@ -336,6 +555,12 @@ static inline unsigned int crypto_skcipher_alignmask(
return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_lskcipher_alignmask(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm));
+}
+
static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
{
return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
@@ -371,6 +596,23 @@ static inline void crypto_sync_skcipher_clear_flags(
crypto_skcipher_clear_flags(&tfm->base, flags);
}
+static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm));
+}
+
+static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags);
+}
+
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
@@ -396,16 +638,47 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
+/**
+ * crypto_lskcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the lskcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm,
+ const u8 *key, unsigned int keylen);
+
static inline unsigned int crypto_skcipher_min_keysize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->min_keysize;
+ return crypto_skcipher_alg_common(tfm)->min_keysize;
}
static inline unsigned int crypto_skcipher_max_keysize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->max_keysize;
+ return crypto_skcipher_alg_common(tfm)->max_keysize;
+}
+
+static inline unsigned int crypto_lskcipher_min_keysize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.min_keysize;
+}
+
+static inline unsigned int crypto_lskcipher_max_keysize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.max_keysize;
}
/**
@@ -458,6 +731,78 @@ int crypto_skcipher_encrypt(struct skcipher_request *req);
int crypto_skcipher_decrypt(struct skcipher_request *req);
/**
+ * crypto_skcipher_export() - export partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @out: output buffer of sufficient size that can hold the state
+ *
+ * Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_export(struct skcipher_request *req, void *out);
+
+/**
+ * crypto_skcipher_import() - import partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @in: buffer holding the state
+ *
+ * Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_import(struct skcipher_request *req, const void *in);
+
+/**
+ * crypto_lskcipher_encrypt() - encrypt plaintext
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ * Encrypt plaintext data using the lskcipher handle.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv);
+
+/**
+ * crypto_lskcipher_decrypt() - decrypt ciphertext
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ *
+ * Decrypt ciphertext data using the lskcipher handle.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv);
+
+/**
* DOC: Symmetric Key Cipher Request Handle
*
* The skcipher_request data structure contains all pointers to data
diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h
index af452556dcd4..04a92c1013c8 100644
--- a/include/crypto/sm2.h
+++ b/include/crypto/sm2.h
@@ -11,15 +11,18 @@
#ifndef _CRYPTO_SM2_H
#define _CRYPTO_SM2_H
-#include <crypto/sm3.h>
-#include <crypto/akcipher.h>
+struct shash_desc;
-/* The default user id as specified in GM/T 0009-2012 */
-#define SM2_DEFAULT_USERID "1234567812345678"
-#define SM2_DEFAULT_USERID_LEN 16
-
-extern int sm2_compute_z_digest(struct crypto_akcipher *tfm,
- const unsigned char *id, size_t id_len,
- unsigned char dgst[SM3_DIGEST_SIZE]);
+#if IS_REACHABLE(CONFIG_CRYPTO_SM2)
+int sm2_compute_z_digest(struct shash_desc *desc,
+ const void *key, unsigned int keylen, void *dgst);
+#else
+static inline int sm2_compute_z_digest(struct shash_desc *desc,
+ const void *key, unsigned int keylen,
+ void *dgst)
+{
+ return -ENOTSUPP;
+}
+#endif
#endif /* _CRYPTO_SM2_H */
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 90b69270f2fa..724c45e3e9a7 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -68,4 +68,9 @@ enum amd_asic_type {
extern const char *amdgpu_asic_name[];
+struct amdgpu_asic_type_quirk {
+ unsigned short device; /* PCI device ID */
+ u8 revision; /* revision ID */
+ unsigned short type; /* real ASIC type */
+};
#endif /*__AMD_ASIC_TYPE_H__ */
diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
new file mode 100644
index 000000000000..4453906105ca
--- /dev/null
+++ b/include/drm/bridge/aux-bridge.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#ifndef DRM_AUX_BRIDGE_H
+#define DRM_AUX_BRIDGE_H
+
+#include <drm/drm_connector.h>
+
+struct auxiliary_device;
+
+#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE)
+int drm_aux_bridge_register(struct device *parent);
+#else
+static inline int drm_aux_bridge_register(struct device *parent)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np);
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev);
+struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np);
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status);
+#else
+static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent,
+ struct device_node *np)
+{
+ return NULL;
+}
+
+static inline int devm_drm_dp_hpd_bridge_add(struct auxiliary_device *adev)
+{
+ return 0;
+}
+
+static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np)
+{
+ return NULL;
+}
+
+static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index f668e75fbabe..6a46baa0737c 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -206,4 +206,6 @@ void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
bool force, bool disabled, bool rxsense);
void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
+bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index 5286a53a1875..65d5e68065e3 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -11,6 +11,10 @@
#include <linux/types.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_modes.h>
struct drm_display_mode;
@@ -55,6 +59,17 @@ struct dw_mipi_dsi_plat_data {
unsigned long mode_flags,
u32 lanes, u32 format);
+ bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ u32 *(*get_input_bus_fmts)(void *priv_data,
+ struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
const struct dw_mipi_dsi_phy_ops *phy_ops;
const struct dw_mipi_dsi_host_ops *host_ops;
@@ -68,5 +83,6 @@ void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder);
void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave);
+struct drm_bridge *dw_mipi_dsi_get_bridge(struct dw_mipi_dsi *dsi);
#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
index ba5484de2b30..e0c105051246 100644
--- a/include/drm/bridge/samsung-dsim.h
+++ b/include/drm/bridge/samsung-dsim.h
@@ -53,12 +53,18 @@ struct samsung_dsim_driver_data {
unsigned int plltmr_reg;
unsigned int has_freqband:1;
unsigned int has_clklane_stop:1;
+ unsigned int has_broken_fifoctrl_emptyhdr:1;
unsigned int num_clks;
+ unsigned int min_freq;
unsigned int max_freq;
unsigned int wait_for_reset;
unsigned int num_bits_resol;
unsigned int pll_p_offset;
const unsigned int *reg_values;
+ unsigned int pll_fin_min;
+ unsigned int pll_fin_max;
+ u16 m_min;
+ u16 m_max;
};
struct samsung_dsim_host_ops {
@@ -84,17 +90,21 @@ struct samsung_dsim {
void __iomem *reg_base;
struct phy *phy;
struct clk **clks;
+ struct clk *pll_clk;
struct regulator_bulk_data supplies[2];
int irq;
struct gpio_desc *te_gpio;
u32 pll_clk_rate;
u32 burst_clk_rate;
+ u32 hs_clock;
u32 esc_clk_rate;
u32 lanes;
u32 mode_flags;
u32 format;
+ bool swap_dn_dp_clk;
+ bool swap_dn_dp_data;
int state;
struct drm_property *brightness;
struct completion completed;
@@ -109,7 +119,7 @@ struct samsung_dsim {
};
extern int samsung_dsim_probe(struct platform_device *pdev);
-extern int samsung_dsim_remove(struct platform_device *pdev);
+extern void samsung_dsim_remove(struct platform_device *pdev);
extern const struct dev_pm_ops samsung_dsim_pm_ops;
#endif /* __SAMSUNG_DSIM__ */
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index f8813c1e059b..3731828825bd 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -148,6 +148,7 @@
#define DP_RECEIVE_PORT_0_CAP_0 0x008
# define DP_LOCAL_EDID_PRESENT (1 << 1)
# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
+# define DP_HBLANK_EXPANSION_CAPABLE (1 << 3)
#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
@@ -543,6 +544,10 @@
/* DFP Capability Extension */
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+#define DP_PANEL_REPLAY_CAP 0x0b0 /* DP 2.0 */
+# define DP_PANEL_REPLAY_SUPPORT (1 << 0)
+# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1)
+
/* Link Configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
@@ -646,6 +651,9 @@
# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38
# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40
# define DP_LINK_QUAL_PATTERN_SQUARE 0x48
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED 0x49
+# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED 0x4a
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED 0x4b
#define DP_TRAINING_LANE0_1_SET2 0x10f
#define DP_TRAINING_LANE2_3_SET2 0x110
@@ -699,6 +707,7 @@
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
# define DP_DECOMPRESSION_EN (1 << 0)
+# define DP_DSC_PASSTHROUGH_EN (1 << 1)
#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
@@ -716,6 +725,13 @@
#define DP_BRANCH_DEVICE_CTRL 0x1a1
# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
+#define PANEL_REPLAY_CONFIG 0x1b0 /* DP 2.0 */
+# define DP_PANEL_REPLAY_ENABLE (1 << 0)
+# define DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN (1 << 3)
+# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN (1 << 4)
+# define DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN (1 << 5)
+# define DP_PANEL_REPLAY_SU_ENABLE (1 << 6)
+
#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
@@ -982,6 +998,7 @@
#define DP_EDP_GENERAL_CAP_2 0x703
# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
+# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4)
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
@@ -1007,6 +1024,7 @@
# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4)
# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5)
# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */
+# define DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE (1 << 7)
#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
@@ -1031,6 +1049,7 @@
#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
+#define DP_EDP_PANEL_TARGET_LUMINANCE_VALUE 0x734
#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
@@ -1102,6 +1121,18 @@
#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
+#define DP_PANEL_REPLAY_ERROR_STATUS 0x2020 /* DP 2.1*/
+# define DP_PANEL_REPLAY_LINK_CRC_ERROR (1 << 0)
+# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR (1 << 1)
+# define DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2)
+
+#define DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS 0x2022 /* DP 2.1 */
+# define DP_SINK_DEVICE_PANEL_REPLAY_STATUS_MASK (7 << 0)
+# define DP_SINK_FRAME_LOCKED_SHIFT 3
+# define DP_SINK_FRAME_LOCKED_MASK (3 << 3)
+# define DP_SINK_FRAME_LOCKED_STATUS_VALID_SHIFT 5
+# define DP_SINK_FRAME_LOCKED_STATUS_VALID_MASK (1 << 5)
+
/* Extended Receiver Capability: See DP_DPCD_REV for definitions */
#define DP_DP13_DPCD_REV 0x2200
@@ -1534,7 +1565,7 @@ enum drm_dp_phy {
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_DSC_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
#define DP_LTTPR_COMMON_CAP_SIZE 8
@@ -1632,7 +1663,7 @@ enum dp_pixelformat {
*
* This enum is used to indicate DP VSC SDP Colorimetry formats.
* It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
- * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition.
+ * DB18] and a name of enum member follows enum drm_colorimetry definition.
*
* @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or
* ITU-R BT.601 colorimetry format
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 86f24a759268..863b2e7add29 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -164,6 +164,7 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp);
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
@@ -251,6 +252,19 @@ drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE])
return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP);
}
+/**
+ * drm_dp_is_uhbr_rate - Determine if a link rate is UHBR
+ * @link_rate: link rate in 10kbits/s units
+ *
+ * Determine if the provided link rate is an UHBR rate.
+ *
+ * Returns: %True if @link_rate is an UHBR rate.
+ */
+static inline bool drm_dp_is_uhbr_rate(int link_rate)
+{
+ return link_rate >= 1000000;
+}
+
/*
* DisplayPort AUX channel
*/
@@ -272,8 +286,8 @@ struct drm_dp_aux_msg {
};
struct cec_adapter;
-struct edid;
struct drm_connector;
+struct drm_edid;
/**
* struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
@@ -507,18 +521,18 @@ bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4], u8 type);
bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
- const struct edid *edid);
+ const struct drm_edid *drm_edid);
int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4]);
int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
- const struct edid *edid);
+ const struct drm_edid *drm_edid);
int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
- const struct edid *edid);
+ const struct drm_edid *drm_edid);
int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
- const struct edid *edid);
+ const struct drm_edid *drm_edid);
bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4]);
bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
@@ -530,7 +544,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
void drm_dp_downstream_debug(struct seq_file *m,
const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4],
- const struct edid *edid,
+ const struct drm_edid *drm_edid,
struct drm_dp_aux *aux);
enum drm_mode_subconnector
drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
@@ -632,6 +646,13 @@ enum drm_dp_quirk {
* the DP_MAX_LINK_RATE register reporting a lower max multiplier.
*/
DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS,
+ /**
+ * @DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC:
+ *
+ * The device applies HBLANK expansion for some modes, but this
+ * requires enabling DSC.
+ */
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC,
};
/**
@@ -699,6 +720,7 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux);
void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
struct drm_connector *connector);
void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address);
void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
#else
@@ -716,6 +738,11 @@ static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
{
}
+static inline void drm_dp_cec_attach(struct drm_dp_aux *aux,
+ u16 source_physical_address)
+{
+}
+
static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
const struct edid *edid)
{
@@ -775,4 +802,15 @@ bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZ
const u8 port_cap[4], u8 color_spc);
int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc);
+#define DRM_DP_BW_OVERHEAD_MST BIT(0)
+#define DRM_DP_BW_OVERHEAD_UHBR BIT(1)
+#define DRM_DP_BW_OVERHEAD_SSC_REF_CLK BIT(2)
+#define DRM_DP_BW_OVERHEAD_FEC BIT(3)
+#define DRM_DP_BW_OVERHEAD_DSC BIT(4)
+
+int drm_dp_bw_overhead(int lane_count, int hactive,
+ int dsc_slice_count,
+ int bpp_x16, unsigned long flags);
+int drm_dp_bw_channel_coding_efficiency(bool is_uhbr);
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index 32c764fb9cb5..9b19d8bd520a 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_fixed.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stackdepot.h>
@@ -46,6 +47,13 @@ struct drm_dp_mst_topology_ref_history {
};
#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
+enum drm_dp_mst_payload_allocation {
+ DRM_DP_MST_PAYLOAD_ALLOCATION_NONE,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_DFP,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE,
+};
+
struct drm_dp_mst_branch;
/**
@@ -138,12 +146,7 @@ struct drm_dp_mst_port {
* @cached_edid: for DP logical ports - make tiling work by ensuring
* that the EDID for all connectors is read immediately.
*/
- struct edid *cached_edid;
- /**
- * @has_audio: Tracks whether the sink connector to this port is
- * audio-capable.
- */
- bool has_audio;
+ const struct drm_edid *cached_edid;
/**
* @fec_capable: bool indicating if FEC can be supported up to that
@@ -542,7 +545,7 @@ struct drm_dp_mst_atomic_payload {
* drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
* previous MST states payload start slots have been copied over to the new state. Note
* that a new start slot won't be assigned/removed from this payload until
- * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called.
+ * drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called.
* * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
* get committed to hardware by calling drm_crtc_commit_wait() on each of the
* &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
@@ -569,6 +572,9 @@ struct drm_dp_mst_atomic_payload {
/** @dsc_enabled: Whether or not this payload has DSC enabled */
bool dsc_enabled : 1;
+ /** @payload_allocation_status: The allocation status of this payload */
+ enum drm_dp_mst_payload_allocation payload_allocation_status;
+
/** @next: The list node for this payload */
struct list_head next;
};
@@ -612,7 +618,7 @@ struct drm_dp_mst_topology_state {
* @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
* out itself.
*/
- int pbn_div;
+ fixed20_12 pbn_div;
};
#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
@@ -815,8 +821,11 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
-int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-
+int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
+ const u8 *esi,
+ u8 *ack,
+ bool *handled);
+void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
int
drm_dp_mst_detect_port(struct drm_connector *connector,
@@ -824,12 +833,17 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
-struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
-int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count);
+fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int link_lane_count);
-int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
+int drm_dp_calc_pbn_mode(int clock, int bpp);
void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
@@ -839,10 +853,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload);
-void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_topology_state *mst_state,
- const struct drm_dp_mst_atomic_payload *old_payload,
- struct drm_dp_mst_atomic_payload *new_payload);
+void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
+void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ const struct drm_dp_mst_atomic_payload *old_payload,
+ struct drm_dp_mst_atomic_payload *new_payload);
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
@@ -876,6 +893,9 @@ drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *
drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
struct drm_dp_mst_port *port);
+bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_mst_port *parent);
int __must_check
drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
@@ -897,6 +917,10 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
struct drm_dp_query_stream_enc_status_ack_reply *status);
+int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_port **failing_port);
int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h
index 8b41edbbabab..913aa2071232 100644
--- a/include/drm/display/drm_dsc_helper.h
+++ b/include/drm/display/drm_dsc_helper.h
@@ -10,11 +10,24 @@
#include <drm/display/drm_dsc.h>
+enum drm_dsc_params_type {
+ DRM_DSC_1_2_444,
+ DRM_DSC_1_1_PRE_SCR, /* legacy params from DSC 1.1 */
+ DRM_DSC_1_2_422,
+ DRM_DSC_1_2_420,
+};
+
void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size);
void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
const struct drm_dsc_config *dsc_cfg);
+void drm_dsc_set_const_params(struct drm_dsc_config *vdsc_cfg);
+void drm_dsc_set_rc_buf_thresh(struct drm_dsc_config *vdsc_cfg);
+int drm_dsc_setup_rc_params(struct drm_dsc_config *vdsc_cfg, enum drm_dsc_params_type type);
int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
+u8 drm_dsc_initial_scale_value(const struct drm_dsc_config *dsc);
+u32 drm_dsc_flatness_det_thresh(const struct drm_dsc_config *dsc);
+u32 drm_dsc_get_bpp_int(const struct drm_dsc_config *vdsc_cfg);
#endif /* _DRM_DSC_HELPER_H_ */
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
index d4955062c77e..f4d3784b1dce 100644
--- a/include/drm/drm_accel.h
+++ b/include/drm/drm_accel.h
@@ -58,7 +58,8 @@ int accel_minor_alloc(void);
void accel_minor_replace(struct drm_minor *minor, int index);
void accel_set_device_instance_params(struct device *kdev, int index);
int accel_open(struct inode *inode, struct file *filp);
-void accel_debugfs_init(struct drm_minor *minor, int minor_id);
+void accel_debugfs_init(struct drm_device *dev);
+void accel_debugfs_register(struct drm_device *dev);
#else
@@ -89,7 +90,11 @@ static inline void accel_set_device_instance_params(struct device *kdev, int ind
{
}
-static inline void accel_debugfs_init(struct drm_minor *minor, int minor_id)
+static inline void accel_debugfs_init(struct drm_device *dev)
+{
+}
+
+static inline void accel_debugfs_register(struct drm_device *dev)
{
}
diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h
index 7096703c3949..cbe33b49fd5d 100644
--- a/include/drm/drm_aperture.h
+++ b/include/drm/drm_aperture.h
@@ -13,14 +13,13 @@ int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t
resource_size_t size);
int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
- bool primary, const struct drm_driver *req_driver);
+ const struct drm_driver *req_driver);
int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
const struct drm_driver *req_driver);
/**
* drm_aperture_remove_framebuffers - remove all existing framebuffers
- * @primary: also kick vga16fb if present
* @req_driver: requesting DRM driver
*
* This function removes all graphics device drivers. Use this function on systems
@@ -30,9 +29,9 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
* 0 on success, or a negative errno code otherwise
*/
static inline int
-drm_aperture_remove_framebuffers(bool primary, const struct drm_driver *req_driver)
+drm_aperture_remove_framebuffers(const struct drm_driver *req_driver)
{
- return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary,
+ return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1,
req_driver);
}
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 9a022caacf93..cf8e1220a4ac 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -1126,7 +1126,7 @@ struct drm_bridge_state {
struct drm_bus_cfg input_bus_cfg;
/**
- * @output_bus_cfg: input bus configuration
+ * @output_bus_cfg: output bus configuration
*/
struct drm_bus_cfg output_bus_cfg;
};
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 536a0b0091c3..9aa0a05aa072 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -49,9 +49,8 @@ struct drm_private_state;
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
-int
-drm_atomic_helper_check_wb_encoder_state(struct drm_encoder *encoder,
- struct drm_connector_state *conn_state);
+int drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
+ struct drm_atomic_state *state);
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
const struct drm_crtc_state *crtc_state,
int min_scale,
@@ -97,6 +96,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
+void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0)
#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1)
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index ba248ca8866f..50131383ed81 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -33,24 +33,6 @@
#include <linux/wait.h>
struct drm_file;
-struct drm_hw_lock;
-
-/*
- * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for
- * include ordering reasons.
- *
- * DO NOT USE.
- */
-struct drm_lock_data {
- struct drm_hw_lock *hw_lock;
- struct drm_file *file_priv;
- wait_queue_head_t lock_queue;
- unsigned long lock_time;
- spinlock_t spinlock;
- uint32_t kernel_waiters;
- uint32_t user_waiters;
- int idle_has_lock;
-};
/**
* struct drm_master - drm master structure
@@ -145,10 +127,6 @@ struct drm_master {
* Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
*/
struct idr lessee_idr;
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- struct drm_lock_data lock;
-#endif
};
struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index bf964cdfb330..e39da5807ba7 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -32,10 +32,13 @@
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
+struct device_node;
+
struct drm_bridge;
struct drm_bridge_timings;
struct drm_connector;
struct drm_display_info;
+struct drm_minor;
struct drm_panel;
struct edid;
struct i2c_adapter;
@@ -191,7 +194,7 @@ struct drm_bridge_funcs {
* or &drm_encoder_helper_funcs.dpms hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
- * singals) feeding it is no longer running when this callback is
+ * signals) feeding it is no longer running when this callback is
* called.
*
* The @post_disable callback is optional.
@@ -715,10 +718,8 @@ struct drm_bridge {
struct drm_encoder *encoder;
/** @chain_node: used to form a bridge chain */
struct list_head chain_node;
-#ifdef CONFIG_OF
/** @of_node: device node pointer to the bridge */
struct device_node *of_node;
-#endif
/** @list: to keep track of all added bridges */
struct list_head list;
/**
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 572077ff8ae7..a5b39fc01003 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -22,8 +22,9 @@
start__ >= max__ || size__ > max__ - start__; \
})
-#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
-#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
+#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
+#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
+#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
struct drm_buddy_block {
#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
@@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
void drm_buddy_block_print(struct drm_buddy *mm,
struct drm_buddy_block *block,
struct drm_printer *p);
-
#endif
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index c0a14b40c039..d47458ecdac4 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -195,6 +195,6 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
drm_for_each_connector_iter(connector, iter) \
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
-void drm_client_debugfs_init(struct drm_minor *minor);
+void drm_client_debugfs_init(struct drm_device *dev);
#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 81c298488b0c..ed81741036d7 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -24,6 +24,7 @@
#define __DRM_COLOR_MGMT_H__
#include <linux/ctype.h>
+#include <linux/math64.h>
#include <drm/drm_property.h>
struct drm_crtc;
@@ -36,20 +37,17 @@ struct drm_plane;
*
* Extract a degamma/gamma LUT value provided by user (in the form of
* &drm_color_lut entries) and round it to the precision supported by the
- * hardware.
+ * hardware, following OpenGL int<->float conversion rules
+ * (see eg. OpenGL 4.6 specification - 2.3.5 Fixed-Point Data Conversions).
*/
static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision)
{
- u32 val = user_input;
- u32 max = 0xffff >> (16 - bit_precision);
-
- /* Round only if we're not using full precision. */
- if (bit_precision < 16) {
- val += 1UL << (16 - bit_precision - 1);
- val >>= 16 - bit_precision;
- }
-
- return clamp_val(val, 0, max);
+ if (bit_precision > 16)
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(user_input, (1 << bit_precision) - 1),
+ (1 << 16) - 1);
+ else
+ return DIV_ROUND_CLOSEST(user_input * ((1 << bit_precision) - 1),
+ (1 << 16) - 1);
}
u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 7b5048516185..fe88d7fc6b8f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -30,6 +30,7 @@
#include <linux/notifier.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_util.h>
+#include <drm/drm_property.h>
#include <uapi/drm/drm_mode.h>
@@ -199,6 +200,11 @@ enum drm_connector_tv_mode {
*/
DRM_MODE_TV_MODE_SECAM,
+ /**
+ * @DRM_MODE_TV_MODE_MAX: Number of analog TV output modes.
+ *
+ * Internal implementation detail; this is not uABI.
+ */
DRM_MODE_TV_MODE_MAX,
};
@@ -419,37 +425,107 @@ enum drm_privacy_screen_status {
PRIVACY_SCREEN_ENABLED_LOCKED,
};
-/*
- * This is a consolidated colorimetry list supported by HDMI and
+/**
+ * enum drm_colorspace - color space
+ *
+ * This enum is a consolidated colorimetry list supported by HDMI and
* DP protocol standard. The respective connectors will register
* a property with the subset of this list (supported by that
* respective protocol). Userspace will set the colorspace through
* a colorspace property which will be created and exposed to
* userspace.
+ *
+ * DP definitions come from the DP v2.0 spec
+ * HDMI definitions come from the CTA-861-H spec
+ *
+ * A note on YCC and RGB variants:
+ *
+ * Since userspace is not aware of the encoding on the wire
+ * (RGB or YCbCr), drivers are free to pick the appropriate
+ * variant, regardless of what userspace selects. E.g., if
+ * BT2020_RGB is selected by userspace a driver will pick
+ * BT2020_YCC if the encoding on the wire is YUV444 or YUV420.
+ *
+ * @DRM_MODE_COLORIMETRY_DEFAULT:
+ * Driver specific behavior.
+ * @DRM_MODE_COLORIMETRY_NO_DATA:
+ * Driver specific behavior.
+ * @DRM_MODE_COLORIMETRY_SMPTE_170M_YCC:
+ * (HDMI)
+ * SMPTE ST 170M colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT709_YCC:
+ * (HDMI, DP)
+ * ITU-R BT.709 colorimetry format
+ * @DRM_MODE_COLORIMETRY_XVYCC_601:
+ * (HDMI, DP)
+ * xvYCC601 colorimetry format
+ * @DRM_MODE_COLORIMETRY_XVYCC_709:
+ * (HDMI, DP)
+ * xvYCC709 colorimetry format
+ * @DRM_MODE_COLORIMETRY_SYCC_601:
+ * (HDMI, DP)
+ * sYCC601 colorimetry format
+ * @DRM_MODE_COLORIMETRY_OPYCC_601:
+ * (HDMI, DP)
+ * opYCC601 colorimetry format
+ * @DRM_MODE_COLORIMETRY_OPRGB:
+ * (HDMI, DP)
+ * opRGB colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ * (HDMI, DP)
+ * ITU-R BT.2020 Y'c C'bc C'rc (constant luminance) colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT2020_RGB:
+ * (HDMI, DP)
+ * ITU-R BT.2020 R' G' B' colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT2020_YCC:
+ * (HDMI, DP)
+ * ITU-R BT.2020 Y' C'b C'r colorimetry format
+ * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ * (HDMI)
+ * SMPTE ST 2113 P3D65 colorimetry format
+ * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ * (HDMI)
+ * SMPTE ST 2113 P3DCI colorimetry format
+ * @DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED:
+ * (DP)
+ * RGB wide gamut fixed point colorimetry format
+ * @DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT:
+ * (DP)
+ * RGB wide gamut floating point
+ * (scRGB (IEC 61966-2-2)) colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT601_YCC:
+ * (DP)
+ * ITU-R BT.601 colorimetry format
+ * The DP spec does not say whether this is the 525 or the 625
+ * line version.
+ * @DRM_MODE_COLORIMETRY_COUNT:
+ * Not a valid value; merely used four counting
*/
-
-/* For Default case, driver will set the colorspace */
-#define DRM_MODE_COLORIMETRY_DEFAULT 0
-/* CEA 861 Normal Colorimetry options */
-#define DRM_MODE_COLORIMETRY_NO_DATA 0
-#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1
-#define DRM_MODE_COLORIMETRY_BT709_YCC 2
-/* CEA 861 Extended Colorimetry Options */
-#define DRM_MODE_COLORIMETRY_XVYCC_601 3
-#define DRM_MODE_COLORIMETRY_XVYCC_709 4
-#define DRM_MODE_COLORIMETRY_SYCC_601 5
-#define DRM_MODE_COLORIMETRY_OPYCC_601 6
-#define DRM_MODE_COLORIMETRY_OPRGB 7
-#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8
-#define DRM_MODE_COLORIMETRY_BT2020_RGB 9
-#define DRM_MODE_COLORIMETRY_BT2020_YCC 10
-/* Additional Colorimetry extension added as part of CTA 861.G */
-#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11
-#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12
-/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
-#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13
-#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14
-#define DRM_MODE_COLORIMETRY_BT601_YCC 15
+enum drm_colorspace {
+ /* For Default case, driver will set the colorspace */
+ DRM_MODE_COLORIMETRY_DEFAULT = 0,
+ /* CEA 861 Normal Colorimetry options */
+ DRM_MODE_COLORIMETRY_NO_DATA = 0,
+ DRM_MODE_COLORIMETRY_SMPTE_170M_YCC = 1,
+ DRM_MODE_COLORIMETRY_BT709_YCC = 2,
+ /* CEA 861 Extended Colorimetry Options */
+ DRM_MODE_COLORIMETRY_XVYCC_601 = 3,
+ DRM_MODE_COLORIMETRY_XVYCC_709 = 4,
+ DRM_MODE_COLORIMETRY_SYCC_601 = 5,
+ DRM_MODE_COLORIMETRY_OPYCC_601 = 6,
+ DRM_MODE_COLORIMETRY_OPRGB = 7,
+ DRM_MODE_COLORIMETRY_BT2020_CYCC = 8,
+ DRM_MODE_COLORIMETRY_BT2020_RGB = 9,
+ DRM_MODE_COLORIMETRY_BT2020_YCC = 10,
+ /* Additional Colorimetry extension added as part of CTA 861.G */
+ DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 = 11,
+ DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER = 12,
+ /* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
+ DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13,
+ DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14,
+ DRM_MODE_COLORIMETRY_BT601_YCC = 15,
+ DRM_MODE_COLORIMETRY_COUNT
+};
/**
* enum drm_bus_flags - bus_flags info for &drm_display_info
@@ -654,6 +730,14 @@ struct drm_display_info {
bool is_hdmi;
/**
+ * @has_audio: True if the sink supports audio.
+ *
+ * This field shall be used instead of calling
+ * drm_detect_monitor_audio() when possible.
+ */
+ bool has_audio;
+
+ /**
* @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
*/
bool has_hdmi_infoframe;
@@ -733,6 +817,14 @@ struct drm_display_info {
* @quirks: EDID based quirks. Internal to EDID parsing.
*/
u32 quirks;
+
+ /**
+ * @source_physical_address: Source Physical Address from HDMI
+ * Vendor-Specific Data Block, for CEC usage.
+ *
+ * Defaults to CEC_PHYS_ADDR_INVALID (0xffff).
+ */
+ u16 source_physical_address;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -901,7 +993,7 @@ struct drm_connector_state {
* colorspace change on Sink. This is most commonly used to switch
* to wider color gamuts like BT2020.
*/
- u32 colorspace;
+ enum drm_colorspace colorspace;
/**
* @writeback_job: Writeback job for writeback connectors
@@ -1244,7 +1336,8 @@ struct drm_connector_funcs {
* This will get called when a hotplug-event for a drm-connector
* has been received from a source outside the display driver / device.
*/
- void (*oob_hotplug_event)(struct drm_connector *connector);
+ void (*oob_hotplug_event)(struct drm_connector *connector,
+ enum drm_connector_status status);
/**
* @debugfs_init:
@@ -1888,7 +1981,8 @@ drm_connector_is_unregistered(struct drm_connector *connector)
DRM_CONNECTOR_UNREGISTERED;
}
-void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode);
+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode,
+ enum drm_connector_status status);
const char *drm_get_connector_type_name(unsigned int connector_type);
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
@@ -1925,8 +2019,10 @@ int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *conn
bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
-int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector);
-int drm_mode_create_dp_colorspace_property(struct drm_connector *connector);
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector,
+ u32 supported_colorspaces);
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector,
+ u32 supported_colorspaces);
int drm_mode_create_content_type_property(struct drm_device *dev);
int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
@@ -2009,6 +2105,7 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
bool drm_connector_has_possible_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
+const char *drm_get_colorspace_name(enum drm_colorspace colorspace);
/**
* drm_for_each_connector_iter - connector_list iterator macro
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 8e1cbc75143e..8b48a1974da3 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -77,11 +77,6 @@ struct drm_plane_helper_funcs;
* intended to indicate whether a full modeset is needed, rather than strictly
* describing what has changed in a commit. See also:
* drm_atomic_crtc_needs_modeset()
- *
- * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
- * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
- * state like @plane_mask so drivers not converted over to atomic helpers should
- * not rely on these being accurate!
*/
struct drm_crtc_state {
/** @crtc: backpointer to the CRTC */
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index 7616f457ce70..cf06cee4343f 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -34,6 +34,22 @@
#include <linux/types.h>
#include <linux/seq_file.h>
+
+#include <drm/drm_gpuvm.h>
+
+/**
+ * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
+ * @show: the &drm_info_list's show callback
+ * @data: driver private data
+ *
+ * Drivers should use this macro to define a &drm_info_list entry to provide a
+ * debugfs file for dumping the GPU VA space regions and mappings.
+ *
+ * For each DRM GPU VA space drivers should call drm_debugfs_gpuva_info() from
+ * their @show callback.
+ */
+#define DRM_DEBUGFS_GPUVA_INFO(show, data) {"gpuvas", show, DRIVER_GEM_GPUVA, data}
+
/**
* struct drm_info_list - debugfs info list entry
*
@@ -126,14 +142,17 @@ struct drm_debugfs_entry {
void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
struct drm_minor *minor);
-int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor);
+int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
+ struct dentry *root, struct drm_minor *minor);
void drm_debugfs_add_file(struct drm_device *dev, const char *name,
int (*show)(struct seq_file*, void*), void *data);
void drm_debugfs_add_files(struct drm_device *dev,
const struct drm_debugfs_info *files, int count);
+
+int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuvm *gpuvm);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -141,7 +160,8 @@ static inline void drm_debugfs_create_files(const struct drm_info_list *files,
{}
static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor)
+ int count, struct dentry *root,
+ struct drm_minor *minor)
{
return 0;
}
@@ -155,6 +175,12 @@ static inline void drm_debugfs_add_files(struct drm_device *dev,
const struct drm_debugfs_info *files,
int count)
{}
+
+static inline int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuvm *gpuvm)
+{
+ return 0;
+}
#endif
#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 7cf4afae2e79..63767cf24371 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -6,7 +6,6 @@
#include <linux/mutex.h>
#include <linux/idr.h>
-#include <drm/drm_legacy.h>
#include <drm/drm_mode_config.h>
struct drm_driver;
@@ -153,8 +152,8 @@ struct drm_device {
*
* Lock for others (not &drm_minor.master and &drm_file.is_master)
*
- * WARNING:
- * Only drivers annotated with DRIVER_LEGACY should be using this.
+ * TODO: This lock used to be the BKL of the DRM subsystem. Move the
+ * lock into i915, which is the only remaining user.
*/
struct mutex struct_mutex;
@@ -312,85 +311,11 @@ struct drm_device {
struct drm_fb_helper *fb_helper;
/**
- * @debugfs_mutex:
+ * @debugfs_root:
*
- * Protects &debugfs_list access.
+ * Root directory for debugfs files.
*/
- struct mutex debugfs_mutex;
-
- /**
- * @debugfs_list:
- *
- * List of debugfs files to be created by the DRM device. The files
- * must be added during drm_dev_register().
- */
- struct list_head debugfs_list;
-
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- /* List of devices per driver for stealth attach cleanup */
- struct list_head legacy_dev_list;
-
-#ifdef __alpha__
- /** @hose: PCI hose, only used on ALPHA platforms. */
- struct pci_controller *hose;
-#endif
-
- /* AGP data */
- struct drm_agp_head *agp;
-
- /* Context handle management - linked list of context handles */
- struct list_head ctxlist;
-
- /* Context handle management - mutex for &ctxlist */
- struct mutex ctxlist_mutex;
-
- /* Context handle management */
- struct idr ctx_idr;
-
- /* Memory management - linked list of regions */
- struct list_head maplist;
-
- /* Memory management - user token hash table for maps */
- struct drm_open_hash map_hash;
-
- /* Context handle management - list of vmas (for debugging) */
- struct list_head vmalist;
-
- /* Optional pointer for DMA support */
- struct drm_device_dma *dma;
-
- /* Context swapping flag */
- __volatile__ long context_flag;
-
- /* Last current context */
- int last_context;
-
- /* Lock for &buf_use and a few other things. */
- spinlock_t buf_lock;
-
- /* Usage counter for buffers in use -- cannot alloc */
- int buf_use;
-
- /* Buffer allocation in progress */
- atomic_t buf_alloc;
-
- struct {
- int context;
- struct drm_hw_lock *lock;
- } sigdata;
-
- struct drm_local_map *agp_buffer_map;
- unsigned int agp_buffer_token;
-
- /* Scatter gather memory */
- struct drm_sg_mem *sg;
-
- /* IRQs */
- bool irq_enabled;
- int irq;
-#endif
+ struct dentry *debugfs_root;
};
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index b419c59c4bef..8878260d7529 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -104,6 +104,21 @@ enum drm_driver_feature {
* acceleration should be handled by two drivers that are connected using auxiliary bus.
*/
DRIVER_COMPUTE_ACCEL = BIT(7),
+ /**
+ * @DRIVER_GEM_GPUVA:
+ *
+ * Driver supports user defined GPU VA bindings for GEM objects.
+ */
+ DRIVER_GEM_GPUVA = BIT(8),
+ /**
+ * @DRIVER_CURSOR_HOTSPOT:
+ *
+ * Driver supports and requires cursor hotspot information in the
+ * cursor plane (e.g. cursor plane has to actually track the mouse
+ * cursor and the clients are required to set hotspot in order for
+ * the cursor planes to work correctly).
+ */
+ DRIVER_CURSOR_HOTSPOT = BIT(9),
/* IMPORTANT: Below are all the legacy flags, add new ones above. */
@@ -304,22 +319,14 @@ struct drm_driver {
/**
* @prime_handle_to_fd:
*
- * Main PRIME export function. Should be implemented with
- * drm_gem_prime_handle_to_fd() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME export function. Only used by vmwgfx.
*/
int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, int *prime_fd);
/**
* @prime_fd_to_handle:
*
- * Main PRIME import function. Should be implemented with
- * drm_gem_prime_fd_to_handle() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME import function. Only used by vmwgfx.
*/
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
@@ -343,20 +350,6 @@ struct drm_driver {
struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
- /**
- * @gem_prime_mmap:
- *
- * mmap hook for GEM drivers, used to implement dma-buf mmap in the
- * PRIME helpers.
- *
- * This hook only exists for historical reasons. Drivers must use
- * drm_gem_prime_mmap() to implement it.
- *
- * FIXME: Convert all drivers to implement mmap in struct
- * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into
- * its callers. This hook should be removed afterwards.
- */
- int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**
* @dumb_create:
@@ -401,6 +394,13 @@ struct drm_driver {
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
+ /**
+ * @show_fdinfo:
+ *
+ * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst.
+ */
+ void (*show_fdinfo)(struct drm_printer *p, struct drm_file *f);
+
/** @major: driver major number */
int major;
/** @minor: driver minor number */
@@ -442,25 +442,6 @@ struct drm_driver {
* some examples.
*/
const struct file_operations *fops;
-
-#ifdef CONFIG_DRM_LEGACY
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-
- int (*firstopen) (struct drm_device *);
- void (*preclose) (struct drm_device *, struct drm_file *file_priv);
- int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- int (*dma_quiescent) (struct drm_device *);
- int (*context_dtor) (struct drm_device *dev, int context);
- irqreturn_t (*irq_handler)(int irq, void *arg);
- void (*irq_preinstall)(struct drm_device *dev);
- int (*irq_postinstall)(struct drm_device *dev);
- void (*irq_uninstall)(struct drm_device *dev);
- u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
- int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
- void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
- int dev_priv_size;
-#endif
};
void *__devm_drm_dev_alloc(struct device *parent,
@@ -590,4 +571,12 @@ static inline bool drm_firmware_drivers_only(void)
return video_firmware_drivers_only();
}
+#if defined(CONFIG_DEBUG_FS)
+void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root);
+#else
+static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+{
+}
+#endif
+
#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 571885d32907..518d1b8106c7 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -61,15 +61,9 @@ struct std_timing {
u8 vfreq_aspect;
} __attribute__((packed));
-#define DRM_EDID_PT_SYNC_MASK (3 << 3)
-# define DRM_EDID_PT_ANALOG_CSYNC (0 << 3)
-# define DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC (1 << 3)
-# define DRM_EDID_PT_DIGITAL_CSYNC (2 << 3)
-# define DRM_EDID_PT_CSYNC_ON_RGB (1 << 1) /* analog csync only */
-# define DRM_EDID_PT_CSYNC_SERRATE (1 << 2)
-# define DRM_EDID_PT_DIGITAL_SEPARATE_SYNC (3 << 3)
-# define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) /* also digital csync */
-# define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
+#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
+#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
+#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
#define DRM_EDID_PT_STEREO (1 << 5)
#define DRM_EDID_PT_INTERLACED (1 << 7)
@@ -275,64 +269,6 @@ struct detailed_timing {
#define DRM_EDID_DSC_MAX_SLICES 0xf
#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
-/* ELD Header Block */
-#define DRM_ELD_HEADER_BLOCK_SIZE 4
-
-#define DRM_ELD_VER 0
-# define DRM_ELD_VER_SHIFT 3
-# define DRM_ELD_VER_MASK (0x1f << 3)
-# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
-# define DRM_ELD_VER_CANNED (0x1f << 3)
-
-#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
-
-/* ELD Baseline Block for ELD_Ver == 2 */
-#define DRM_ELD_CEA_EDID_VER_MNL 4
-# define DRM_ELD_CEA_EDID_VER_SHIFT 5
-# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
-# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
-# define DRM_ELD_MNL_SHIFT 0
-# define DRM_ELD_MNL_MASK (0x1f << 0)
-
-#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
-# define DRM_ELD_SAD_COUNT_SHIFT 4
-# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
-# define DRM_ELD_CONN_TYPE_SHIFT 2
-# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
-# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
-# define DRM_ELD_CONN_TYPE_DP (1 << 2)
-# define DRM_ELD_SUPPORTS_AI (1 << 1)
-# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
-
-#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
-# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
-
-#define DRM_ELD_SPEAKER 7
-# define DRM_ELD_SPEAKER_MASK 0x7f
-# define DRM_ELD_SPEAKER_RLRC (1 << 6)
-# define DRM_ELD_SPEAKER_FLRC (1 << 5)
-# define DRM_ELD_SPEAKER_RC (1 << 4)
-# define DRM_ELD_SPEAKER_RLR (1 << 3)
-# define DRM_ELD_SPEAKER_FC (1 << 2)
-# define DRM_ELD_SPEAKER_LFE (1 << 1)
-# define DRM_ELD_SPEAKER_FLR (1 << 0)
-
-#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
-# define DRM_ELD_PORT_ID_LEN 8
-
-#define DRM_ELD_MANUFACTURER_NAME0 16
-#define DRM_ELD_MANUFACTURER_NAME1 17
-
-#define DRM_ELD_PRODUCT_CODE0 18
-#define DRM_ELD_PRODUCT_CODE1 19
-
-#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
-
-#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
-
struct edid {
u8 header[8];
/* Vendor & product info */
@@ -393,11 +329,6 @@ int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
-#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
-int __drm_set_edid_firmware_path(const char *path);
-int __drm_get_edid_firmware_path(char *buf, size_t bufsize);
-#endif
-
bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2);
int
@@ -416,96 +347,6 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
enum hdmi_quantization_range rgb_quant_range);
/**
- * drm_eld_mnl - Get ELD monitor name length in bytes.
- * @eld: pointer to an eld memory structure with mnl set
- */
-static inline int drm_eld_mnl(const uint8_t *eld)
-{
- return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
-}
-
-/**
- * drm_eld_sad - Get ELD SAD structures.
- * @eld: pointer to an eld memory structure with sad_count set
- */
-static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
-{
- unsigned int ver, mnl;
-
- ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
- if (ver != 2 && ver != 31)
- return NULL;
-
- mnl = drm_eld_mnl(eld);
- if (mnl > 16)
- return NULL;
-
- return eld + DRM_ELD_CEA_SAD(mnl, 0);
-}
-
-/**
- * drm_eld_sad_count - Get ELD SAD count.
- * @eld: pointer to an eld memory structure with sad_count set
- */
-static inline int drm_eld_sad_count(const uint8_t *eld)
-{
- return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
- DRM_ELD_SAD_COUNT_SHIFT;
-}
-
-/**
- * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
- * @eld: pointer to an eld memory structure with mnl and sad_count set
- *
- * This is a helper for determining the payload size of the baseline block, in
- * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
- */
-static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
-{
- return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
- drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
-}
-
-/**
- * drm_eld_size - Get ELD size in bytes
- * @eld: pointer to a complete eld memory structure
- *
- * The returned value does not include the vendor block. It's vendor specific,
- * and comprises of the remaining bytes in the ELD memory buffer after
- * drm_eld_size() bytes of header and baseline block.
- *
- * The returned value is guaranteed to be a multiple of 4.
- */
-static inline int drm_eld_size(const uint8_t *eld)
-{
- return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
-}
-
-/**
- * drm_eld_get_spk_alloc - Get speaker allocation
- * @eld: pointer to an ELD memory structure
- *
- * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER
- * field definitions to identify speakers.
- */
-static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld)
-{
- return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK;
-}
-
-/**
- * drm_eld_get_conn_type - Get device type hdmi/dp connected
- * @eld: pointer to an ELD memory structure
- *
- * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
- * identify the display type connected.
- */
-static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
-{
- return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
-}
-
-/**
* drm_edid_decode_mfg_id - Decode the manufacturer ID
* @mfg_id: The manufacturer ID
* @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
@@ -613,9 +454,12 @@ const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector,
const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector,
int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len),
void *context);
+const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
int drm_edid_connector_update(struct drm_connector *connector,
const struct drm_edid *edid);
int drm_edid_connector_add_modes(struct drm_connector *connector);
+bool drm_edid_is_digital(const struct drm_edid *drm_edid);
const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid,
int ext_id, int *ext_index);
diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h
new file mode 100644
index 000000000000..0a88d10b28b0
--- /dev/null
+++ b/include/drm/drm_eld.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __DRM_ELD_H__
+#define __DRM_ELD_H__
+
+#include <linux/types.h>
+
+struct cea_sad;
+
+/* ELD Header Block */
+#define DRM_ELD_HEADER_BLOCK_SIZE 4
+
+#define DRM_ELD_VER 0
+# define DRM_ELD_VER_SHIFT 3
+# define DRM_ELD_VER_MASK (0x1f << 3)
+# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
+# define DRM_ELD_VER_CANNED (0x1f << 3)
+
+#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
+
+/* ELD Baseline Block for ELD_Ver == 2 */
+#define DRM_ELD_CEA_EDID_VER_MNL 4
+# define DRM_ELD_CEA_EDID_VER_SHIFT 5
+# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
+# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
+# define DRM_ELD_MNL_SHIFT 0
+# define DRM_ELD_MNL_MASK (0x1f << 0)
+
+#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
+# define DRM_ELD_SAD_COUNT_SHIFT 4
+# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
+# define DRM_ELD_CONN_TYPE_SHIFT 2
+# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
+# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
+# define DRM_ELD_CONN_TYPE_DP (1 << 2)
+# define DRM_ELD_SUPPORTS_AI (1 << 1)
+# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
+
+#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
+# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
+
+#define DRM_ELD_SPEAKER 7
+# define DRM_ELD_SPEAKER_MASK 0x7f
+# define DRM_ELD_SPEAKER_RLRC (1 << 6)
+# define DRM_ELD_SPEAKER_FLRC (1 << 5)
+# define DRM_ELD_SPEAKER_RC (1 << 4)
+# define DRM_ELD_SPEAKER_RLR (1 << 3)
+# define DRM_ELD_SPEAKER_FC (1 << 2)
+# define DRM_ELD_SPEAKER_LFE (1 << 1)
+# define DRM_ELD_SPEAKER_FLR (1 << 0)
+
+#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
+# define DRM_ELD_PORT_ID_LEN 8
+
+#define DRM_ELD_MANUFACTURER_NAME0 16
+#define DRM_ELD_MANUFACTURER_NAME1 17
+
+#define DRM_ELD_PRODUCT_CODE0 18
+#define DRM_ELD_PRODUCT_CODE1 19
+
+#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
+
+#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+
+/**
+ * drm_eld_mnl - Get ELD monitor name length in bytes.
+ * @eld: pointer to an eld memory structure with mnl set
+ */
+static inline int drm_eld_mnl(const u8 *eld)
+{
+ return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+}
+
+int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad);
+int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad);
+
+/**
+ * drm_eld_sad - Get ELD SAD structures.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline const u8 *drm_eld_sad(const u8 *eld)
+{
+ unsigned int ver, mnl;
+
+ ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
+ if (ver != 2 && ver != 31)
+ return NULL;
+
+ mnl = drm_eld_mnl(eld);
+ if (mnl > 16)
+ return NULL;
+
+ return eld + DRM_ELD_CEA_SAD(mnl, 0);
+}
+
+/**
+ * drm_eld_sad_count - Get ELD SAD count.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline int drm_eld_sad_count(const u8 *eld)
+{
+ return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
+ DRM_ELD_SAD_COUNT_SHIFT;
+}
+
+/**
+ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
+ * @eld: pointer to an eld memory structure with mnl and sad_count set
+ *
+ * This is a helper for determining the payload size of the baseline block, in
+ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
+ */
+static inline int drm_eld_calc_baseline_block_size(const u8 *eld)
+{
+ return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
+ drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+}
+
+/**
+ * drm_eld_size - Get ELD size in bytes
+ * @eld: pointer to a complete eld memory structure
+ *
+ * The returned value does not include the vendor block. It's vendor specific,
+ * and comprises of the remaining bytes in the ELD memory buffer after
+ * drm_eld_size() bytes of header and baseline block.
+ *
+ * The returned value is guaranteed to be a multiple of 4.
+ */
+static inline int drm_eld_size(const u8 *eld)
+{
+ return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
+}
+
+/**
+ * drm_eld_get_spk_alloc - Get speaker allocation
+ * @eld: pointer to an ELD memory structure
+ *
+ * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER
+ * field definitions to identify speakers.
+ */
+static inline u8 drm_eld_get_spk_alloc(const u8 *eld)
+{
+ return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK;
+}
+
+/**
+ * drm_eld_get_conn_type - Get device type hdmi/dp connected
+ * @eld: pointer to an ELD memory structure
+ *
+ * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
+ * identify the display type connected.
+ */
+static inline u8 drm_eld_get_conn_type(const u8 *eld)
+{
+ return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
+}
+
+#endif /* __DRM_ELD_H__ */
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 3a09682af685..977a9381c8ba 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -60,7 +60,7 @@ struct drm_encoder_funcs {
* @late_register:
*
* This optional hook can be used to register additional userspace
- * interfaces attached to the encoder like debugfs interfaces.
+ * interfaces attached to the encoder.
* It is called late in the driver load sequence from drm_dev_register().
* Everything added from this callback should be unregistered in
* the early_unregister callback.
@@ -81,6 +81,13 @@ struct drm_encoder_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_encoder *encoder);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows encoders to create encoder-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root);
};
/**
@@ -184,6 +191,13 @@ struct drm_encoder {
const struct drm_encoder_funcs *funcs;
const struct drm_encoder_helper_funcs *helper_private;
+
+ /**
+ * @debugfs_entry:
+ *
+ * Debugfs directory for this CRTC.
+ */
+ struct dentry *debugfs_entry;
};
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
new file mode 100644
index 000000000000..f1a66c048721
--- /dev/null
+++ b/include/drm/drm_exec.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef __DRM_EXEC_H__
+#define __DRM_EXEC_H__
+
+#include <linux/compiler.h>
+#include <linux/ww_mutex.h>
+
+#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0)
+#define DRM_EXEC_IGNORE_DUPLICATES BIT(1)
+
+struct drm_gem_object;
+
+/**
+ * struct drm_exec - Execution context
+ */
+struct drm_exec {
+ /**
+ * @flags: Flags to control locking behavior
+ */
+ uint32_t flags;
+
+ /**
+ * @ticket: WW ticket used for acquiring locks
+ */
+ struct ww_acquire_ctx ticket;
+
+ /**
+ * @num_objects: number of objects locked
+ */
+ unsigned int num_objects;
+
+ /**
+ * @max_objects: maximum objects in array
+ */
+ unsigned int max_objects;
+
+ /**
+ * @objects: array of the locked objects
+ */
+ struct drm_gem_object **objects;
+
+ /**
+ * @contended: contended GEM object we backed off for
+ */
+ struct drm_gem_object *contended;
+
+ /**
+ * @prelocked: already locked GEM object due to contention
+ */
+ struct drm_gem_object *prelocked;
+};
+
+/**
+ * drm_exec_obj() - Return the object for a give drm_exec index
+ * @exec: Pointer to the drm_exec context
+ * @index: The index.
+ *
+ * Return: Pointer to the locked object corresponding to @index if
+ * index is within the number of locked objects. NULL otherwise.
+ */
+static inline struct drm_gem_object *
+drm_exec_obj(struct drm_exec *exec, unsigned long index)
+{
+ return index < exec->num_objects ? exec->objects[index] : NULL;
+}
+
+/**
+ * drm_exec_for_each_locked_object - iterate over all the locked objects
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object.
+ */
+#define drm_exec_for_each_locked_object(exec, index, obj) \
+ for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index))
+
+/**
+ * drm_exec_for_each_locked_object_reverse - iterate over all the locked
+ * objects in reverse locking order
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object in
+ * reverse locking order. Note that @index may go below zero and wrap,
+ * but that will be caught by drm_exec_obj(), returning a NULL object.
+ */
+#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \
+ for ((index) = (exec)->num_objects - 1; \
+ ((obj) = drm_exec_obj(exec, index)); --(index))
+
+/**
+ * drm_exec_until_all_locked - loop until all GEM objects are locked
+ * @exec: drm_exec object
+ *
+ * Core functionality of the drm_exec object. Loops until all GEM objects are
+ * locked and no more contention exists. At the beginning of the loop it is
+ * guaranteed that no GEM object is locked.
+ *
+ * Since labels can't be defined local to the loops body we use a jump pointer
+ * to make sure that the retry is only used from within the loops body.
+ */
+#define drm_exec_until_all_locked(exec) \
+__PASTE(__drm_exec_, __LINE__): \
+ for (void *__drm_exec_retry_ptr; ({ \
+ __drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\
+ (void)__drm_exec_retry_ptr; \
+ drm_exec_cleanup(exec); \
+ });)
+
+/**
+ * drm_exec_retry_on_contention - restart the loop to grap all locks
+ * @exec: drm_exec object
+ *
+ * Control flow helper to continue when a contention was detected and we need to
+ * clean up and re-start the loop to prepare all GEM objects.
+ */
+#define drm_exec_retry_on_contention(exec) \
+ do { \
+ if (unlikely(drm_exec_is_contended(exec))) \
+ goto *__drm_exec_retry_ptr; \
+ } while (0)
+
+/**
+ * drm_exec_is_contended - check for contention
+ * @exec: drm_exec object
+ *
+ * Returns true if the drm_exec object has run into some contention while
+ * locking a GEM object and needs to clean up.
+ */
+static inline bool drm_exec_is_contended(struct drm_exec *exec)
+{
+ return !!exec->contended;
+}
+
+void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr);
+void drm_exec_fini(struct drm_exec *exec);
+bool drm_exec_cleanup(struct drm_exec *exec);
+int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+ unsigned int num_fences);
+int drm_exec_prepare_array(struct drm_exec *exec,
+ struct drm_gem_object **objects,
+ unsigned int num_objects,
+ unsigned int num_fences);
+
+#endif
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 72032c354a30..375737fd6c36 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -253,31 +253,10 @@ void drm_fb_helper_fill_info(struct fb_info *info,
struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
-void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist);
+void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len);
+void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height);
-ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
- size_t count, loff_t *ppos);
-ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos);
-
-void drm_fb_helper_sys_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-void drm_fb_helper_sys_copyarea(struct fb_info *info,
- const struct fb_copyarea *area);
-void drm_fb_helper_sys_imageblit(struct fb_info *info,
- const struct fb_image *image);
-
-ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf,
- size_t count, loff_t *ppos);
-ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos);
-
-void drm_fb_helper_cfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-void drm_fb_helper_cfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area);
-void drm_fb_helper_cfb_imageblit(struct fb_info *info,
- const struct fb_image *image);
+void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist);
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend);
void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
@@ -389,67 +368,6 @@ static inline void drm_fb_helper_deferred_io(struct fb_info *info,
{
}
-static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
-{
- return -ENODEV;
-}
-
-static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
- char __user *buf, size_t count,
- loff_t *ppos)
-{
- return -ENODEV;
-}
-
-static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return -ENODEV;
-}
-
-static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
-}
-
-static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
-}
-
-static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
-}
-
-static inline ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return -ENODEV;
-}
-
-static inline ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return -ENODEV;
-}
-
-static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
-}
-
-static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
-}
-
-static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
-}
-
static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
bool suspend)
{
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index ecffe24e2b1b..ab230d3af138 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -41,6 +41,7 @@
struct dma_fence;
struct drm_file;
struct drm_device;
+struct drm_printer;
struct device;
struct file;
@@ -49,16 +50,16 @@ struct file;
* header include loops we need it here for now.
*/
-/* Note that the order of this enum is ABI (it determines
+/* Note that the values of this enum are ABI (it determines
* /dev/dri/renderD* numbers).
*
* Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to
* be implemented before we hit any future
*/
enum drm_minor_type {
- DRM_MINOR_PRIMARY,
- DRM_MINOR_CONTROL,
- DRM_MINOR_RENDER,
+ DRM_MINOR_PRIMARY = 0,
+ DRM_MINOR_CONTROL = 1,
+ DRM_MINOR_RENDER = 2,
DRM_MINOR_ACCEL = 32,
};
@@ -78,10 +79,8 @@ struct drm_minor {
struct device *kdev; /* Linux device */
struct drm_device *dev;
+ struct dentry *debugfs_symlink;
struct dentry *debugfs_root;
-
- struct list_head debugfs_list;
- struct mutex debugfs_lock; /* Protects debugfs_list. */
};
/**
@@ -228,6 +227,18 @@ struct drm_file {
bool is_master;
/**
+ * @supports_virtualized_cursor_plane:
+ *
+ * This client is capable of handling the cursor plane with the
+ * restrictions imposed on it by the virtualized drivers.
+ *
+ * This implies that the cursor plane has to behave like a cursor
+ * i.e. track cursor movement. It also requires setting of the
+ * hotspot properties by the client on the cursor plane.
+ */
+ bool supports_virtualized_cursor_plane;
+
+ /**
* @master:
*
* Master this node is currently associated with. Protected by struct
@@ -255,8 +266,18 @@ struct drm_file {
/** @master_lookup_lock: Serializes @master. */
spinlock_t master_lookup_lock;
- /** @pid: Process that opened this file. */
- struct pid *pid;
+ /**
+ * @pid: Process that is using this file.
+ *
+ * Must only be dereferenced under a rcu_read_lock or equivalent.
+ *
+ * Updates are guarded with dev->filelist_mutex and reference must be
+ * dropped after a RCU grace period to accommodate lockless readers.
+ */
+ struct pid __rcu *pid;
+
+ /** @client_id: A unique id for fdinfo */
+ u64 client_id;
/** @magic: Authentication magic, see @authenticated. */
drm_magic_t magic;
@@ -365,11 +386,6 @@ struct drm_file {
* Per-file buffer caches used by the PRIME buffer sharing code.
*/
struct drm_prime_file_private prime;
-
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- unsigned long lock_count; /* DRI1 legacy lock count */
-#endif
};
/**
@@ -416,6 +432,8 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_ACCEL;
}
+void drm_file_update_pid(struct drm_file *);
+
int drm_open(struct inode *inode, struct file *filp);
int drm_open_helper(struct file *filp, struct drm_minor *minor);
ssize_t drm_read(struct file *filp, char __user *buffer,
@@ -439,6 +457,34 @@ void drm_send_event_timestamp_locked(struct drm_device *dev,
struct drm_pending_event *e,
ktime_t timestamp);
+/**
+ * struct drm_memory_stats - GEM object stats associated
+ * @shared: Total size of GEM objects shared between processes
+ * @private: Total size of GEM objects
+ * @resident: Total size of GEM objects backing pages
+ * @purgeable: Total size of GEM objects that can be purged (resident and not active)
+ * @active: Total size of GEM objects active on one or more engines
+ *
+ * Used by drm_print_memory_stats()
+ */
+struct drm_memory_stats {
+ u64 shared;
+ u64 private;
+ u64 resident;
+ u64 purgeable;
+ u64 active;
+};
+
+enum drm_gem_object_status;
+
+void drm_print_memory_stats(struct drm_printer *p,
+ const struct drm_memory_stats *stats,
+ enum drm_gem_object_status supported_status,
+ const char *region);
+
+void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file);
+void drm_show_fdinfo(struct seq_file *m, struct file *f);
+
struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
#endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 255645c1f9a8..6ea339d5de08 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -71,6 +71,7 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
}
#define DRM_FIXED_POINT 32
+#define DRM_FIXED_POINT_HALF 16
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
@@ -87,6 +88,11 @@ static inline int drm_fixp2int(s64 a)
return ((s64)a) >> DRM_FIXED_POINT;
}
+static inline int drm_fixp2int_round(s64 a)
+{
+ return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
+}
+
static inline int drm_fixp2int_ceil(s64 a)
{
if (a > 0)
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 21c3d512d25c..1eef3283a109 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -31,11 +31,10 @@
/**
* DOC: flip utils
*
- * Util to queue up work to run from work-queue context after flip/vblank.
+ * Utility to queue up work to run from work-queue context after flip/vblank.
* Typically this can be used to defer unref of framebuffer's, cursor
- * bo's, etc until after vblank. The APIs are all thread-safe.
- * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
- * in atomic context.
+ * bo's, etc until after vblank. The APIs are all thread-safe. Moreover,
+ * drm_flip_work_commit() can be called in atomic context.
*/
struct drm_flip_work;
@@ -52,16 +51,6 @@ struct drm_flip_work;
typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
/**
- * struct drm_flip_task - flip work task
- * @node: list entry element
- * @data: data to pass to &drm_flip_work.func
- */
-struct drm_flip_task {
- struct list_head node;
- void *data;
-};
-
-/**
* struct drm_flip_work - flip work queue
* @name: debug name
* @func: callback fxn called for each committed item
@@ -79,9 +68,6 @@ struct drm_flip_work {
spinlock_t lock;
};
-struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
-void drm_flip_work_queue_task(struct drm_flip_work *work,
- struct drm_flip_task *task);
void drm_flip_work_queue(struct drm_flip_work *work, void *val);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 291deb09475b..f13b34e0b752 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -15,6 +15,57 @@ struct drm_rect;
struct iosys_map;
+/**
+ * struct drm_format_conv_state - Stores format-conversion state
+ *
+ * DRM helpers for format conversion store temporary state in
+ * struct drm_xfrm_buf. The buffer's resources can be reused
+ * among multiple conversion operations.
+ *
+ * All fields are considered private.
+ */
+struct drm_format_conv_state {
+ struct {
+ void *mem;
+ size_t size;
+ bool preallocated;
+ } tmp;
+};
+
+#define __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, _preallocated) { \
+ .tmp = { \
+ .mem = (_mem), \
+ .size = (_size), \
+ .preallocated = (_preallocated), \
+ } \
+ }
+
+/**
+ * DRM_FORMAT_CONV_STATE_INIT - Initializer for struct drm_format_conv_state
+ *
+ * Initializes an instance of struct drm_format_conv_state to default values.
+ */
+#define DRM_FORMAT_CONV_STATE_INIT \
+ __DRM_FORMAT_CONV_STATE_INIT(NULL, 0, false)
+
+/**
+ * DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED - Initializer for struct drm_format_conv_state
+ * @_mem: The preallocated memory area
+ * @_size: The number of bytes in _mem
+ *
+ * Initializes an instance of struct drm_format_conv_state to preallocated
+ * storage. The caller is responsible for releasing the provided memory range.
+ */
+#define DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(_mem, _size) \
+ __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, true)
+
+void drm_format_conv_state_init(struct drm_format_conv_state *state);
+void drm_format_conv_state_copy(struct drm_format_conv_state *state,
+ const struct drm_format_conv_state *old_state);
+void *drm_format_conv_state_reserve(struct drm_format_conv_state *state,
+ size_t new_size, gfp_t flags);
+void drm_format_conv_state_release(struct drm_format_conv_state *state);
+
unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format,
const struct drm_rect *clip);
@@ -23,45 +74,49 @@ void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct drm_rect *clip);
void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool cached);
+ const struct drm_rect *clip, bool cached,
+ struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, bool swab);
+ const struct drm_rect *clip, struct drm_format_conv_state *state,
+ bool swab);
void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *rect);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
size_t drm_fb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 532ae78ca747..ccf91daa4307 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -22,6 +22,7 @@
#ifndef __DRM_FOURCC_H__
#define __DRM_FOURCC_H__
+#include <linux/math.h>
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
@@ -279,7 +280,7 @@ int drm_format_info_plane_width(const struct drm_format_info *info, int width,
if (plane == 0)
return width;
- return width / info->hsub;
+ return DIV_ROUND_UP(width, info->hsub);
}
/**
@@ -301,7 +302,7 @@ int drm_format_info_plane_height(const struct drm_format_info *info, int height,
if (plane == 0)
return height;
- return height / info->vsub;
+ return DIV_ROUND_UP(height, info->vsub);
}
const struct drm_format_info *__drm_format_info(u32 format);
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index 0dcc07b68654..668077009fce 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -189,18 +189,6 @@ struct drm_framebuffer {
*/
int flags;
/**
- * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
- */
- int hot_x;
- /**
- * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
- */
- int hot_y;
- /**
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
*/
struct list_head filp_head;
@@ -292,11 +280,6 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
&fb->head != (&(dev)->mode_config.fb_list); \
fb = list_next_entry(fb, head))
-int drm_framebuffer_plane_width(int width,
- const struct drm_framebuffer *fb, int plane);
-int drm_framebuffer_plane_height(int height,
- const struct drm_framebuffer *fb, int plane);
-
/**
* struct drm_afbc_framebuffer - a special afbc frame buffer object
*
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index b8efd836edef..369505447acd 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -36,6 +36,8 @@
#include <linux/kref.h>
#include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <drm/drm_vma_manager.h>
@@ -43,6 +45,25 @@ struct iosys_map;
struct drm_gem_object;
/**
+ * enum drm_gem_object_status - bitmask of object state for fdinfo reporting
+ * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned)
+ * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace
+ *
+ * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status
+ * and drm_show_fdinfo(). Note that an object can DRM_GEM_OBJECT_PURGEABLE if
+ * it still active or not resident, in which case drm_show_fdinfo() will not
+ * account for it as purgeable. So drivers do not need to check if the buffer
+ * is idle and resident to return this bit. (Ie. userspace can mark a buffer
+ * as purgeable even while it is still busy on the GPU.. it does not _actually_
+ * become puregeable until it becomes idle. The status gem object func does
+ * not need to consider this.)
+ */
+enum drm_gem_object_status {
+ DRM_GEM_OBJECT_RESIDENT = BIT(0),
+ DRM_GEM_OBJECT_PURGEABLE = BIT(1),
+};
+
+/**
* struct drm_gem_object_funcs - GEM object functions
*/
struct drm_gem_object_funcs {
@@ -175,6 +196,28 @@ struct drm_gem_object_funcs {
int (*evict)(struct drm_gem_object *obj);
/**
+ * @status:
+ *
+ * The optional status callback can return additional object state
+ * which determines which stats the object is counted against. The
+ * callback is called under table_lock. Racing against object status
+ * change is "harmless", and the callback can expect to not race
+ * against object destruction.
+ *
+ * Called by drm_show_memory_stats().
+ */
+ enum drm_gem_object_status (*status)(struct drm_gem_object *obj);
+
+ /**
+ * @rss:
+ *
+ * Return resident size of the object in physical memory.
+ *
+ * Called by drm_show_memory_stats().
+ */
+ size_t (*rss)(struct drm_gem_object *obj);
+
+ /**
* @vm_ops:
*
* Virtual memory operations used with mmap.
@@ -348,6 +391,22 @@ struct drm_gem_object {
struct dma_resv _resv;
/**
+ * @gpuva:
+ *
+ * Provides the list of GPU VAs attached to this GEM object.
+ *
+ * Drivers should lock list accesses with the GEMs &dma_resv lock
+ * (&drm_gem_object.resv) or a custom lock if one is provided.
+ */
+ struct {
+ struct list_head list;
+
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map *lock_dep_map;
+#endif
+ } gpuva;
+
+ /**
* @funcs:
*
* Optional GEM object functions. If this is set, it will be used instead of the
@@ -494,4 +553,68 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
int drm_gem_evict(struct drm_gem_object *obj);
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
+ * @obj: the &drm_gem_object
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this if you're not proctecting access to the gpuva list with the
+ * dma-resv lock, but with a custom lock.
+ */
+#define drm_gem_gpuva_set_lock(obj, lock) \
+ if (!WARN((obj)->gpuva.lock_dep_map, \
+ "GEM GPUVA lock should be set only once.")) \
+ (obj)->gpuva.lock_dep_map = &(lock)->dep_map
+#define drm_gem_gpuva_assert_lock_held(obj) \
+ lockdep_assert((obj)->gpuva.lock_dep_map ? \
+ lock_is_held((obj)->gpuva.lock_dep_map) : \
+ dma_resv_held((obj)->resv))
+#else
+#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
+#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
+#endif
+
+/**
+ * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
+ * @obj: the &drm_gem_object
+ *
+ * This initializes the &drm_gem_object's &drm_gpuvm_bo list.
+ *
+ * Calling this function is only necessary for drivers intending to support the
+ * &drm_driver_feature DRIVER_GEM_GPUVA.
+ *
+ * See also drm_gem_gpuva_set_lock().
+ */
+static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
+{
+ INIT_LIST_HEAD(&obj->gpuva.list);
+}
+
+/**
+ * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
+ * &drm_gem_object.
+ */
+#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \
+ list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem)
+
+/**
+ * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of
+ * &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step
+ * @next__: &next &drm_gpuvm_bo to store the next step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
+ * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ */
+#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \
+ list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem)
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
index 40b8b039518e..3e01c619a25e 100644
--- a/include/drm/drm_gem_atomic_helper.h
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -5,6 +5,7 @@
#include <linux/iosys-map.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
@@ -49,6 +50,15 @@ struct drm_shadow_plane_state {
/** @base: plane state */
struct drm_plane_state base;
+ /**
+ * @fmtcnv_state: Format-conversion state
+ *
+ * Per-plane state for format conversion.
+ * Flags for copying shadow buffers into backend storage. Also holds
+ * temporary storage for format conversion.
+ */
+ struct drm_format_conv_state fmtcnv_state;
+
/* Transitional state - do not export or duplicate */
/**
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
index 8a043235dad8..a827bde494f6 100644
--- a/include/drm/drm_gem_dma_helper.h
+++ b/include/drm/drm_gem_dma_helper.h
@@ -166,11 +166,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
* DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
*/
#define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = (dumb_create_func), \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table
/**
* DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations
@@ -204,11 +201,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
* DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
*/
#define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = dumb_create_func, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \
- .gem_prime_mmap = drm_gem_prime_mmap
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap
/**
* DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 5994fed5e327..bf0c31aa8fbe 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -27,11 +27,6 @@ struct drm_gem_shmem_object {
struct drm_gem_object base;
/**
- * @pages_lock: Protects the page table and use count
- */
- struct mutex pages_lock;
-
- /**
* @pages: Page table
*/
struct page **pages;
@@ -66,11 +61,6 @@ struct drm_gem_shmem_object {
struct sg_table *sgt;
/**
- * @vmap_lock: Protects the vmap address and use count
- */
- struct mutex vmap_lock;
-
- /**
* @vaddr: Kernel virtual address of the backing memory
*/
void *vaddr;
@@ -109,7 +99,6 @@ struct drm_gem_shmem_object {
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
@@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
!shmem->base.dma_buf && !shmem->base.import_attach;
}
-void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
-bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
@@ -290,10 +278,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
* the &drm_driver structure.
*/
#define DRM_GEM_SHMEM_DRIVER_OPS \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap, \
- .dumb_create = drm_gem_shmem_dumb_create
+ .dumb_create = drm_gem_shmem_dumb_create
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index f4aab64411d8..e18429f09e53 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -157,12 +157,9 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb(
* &struct drm_driver with default functions.
*/
#define DRM_GEM_VRAM_DRIVER \
- .debugfs_init = drm_vram_mm_debugfs_init, \
- .dumb_create = drm_gem_vram_driver_dumb_create, \
- .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \
- .gem_prime_mmap = drm_gem_prime_mmap, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle
+ .debugfs_init = drm_vram_mm_debugfs_init, \
+ .dumb_create = drm_gem_vram_driver_dumb_create, \
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset
/*
* VRAM memory manager
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
new file mode 100644
index 000000000000..9060f9fae6f1
--- /dev/null
+++ b/include/drm/drm_gpuvm.h
@@ -0,0 +1,1247 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef __DRM_GPUVM_H__
+#define __DRM_GPUVM_H__
+
+/*
+ * Copyright (c) 2022 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_exec.h>
+
+struct drm_gpuvm;
+struct drm_gpuvm_bo;
+struct drm_gpuvm_ops;
+
+/**
+ * enum drm_gpuva_flags - flags for struct drm_gpuva
+ */
+enum drm_gpuva_flags {
+ /**
+ * @DRM_GPUVA_INVALIDATED:
+ *
+ * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
+ */
+ DRM_GPUVA_INVALIDATED = (1 << 0),
+
+ /**
+ * @DRM_GPUVA_SPARSE:
+ *
+ * Flag indicating that the &drm_gpuva is a sparse mapping.
+ */
+ DRM_GPUVA_SPARSE = (1 << 1),
+
+ /**
+ * @DRM_GPUVA_USERBITS: user defined bits
+ */
+ DRM_GPUVA_USERBITS = (1 << 2),
+};
+
+/**
+ * struct drm_gpuva - structure to track a GPU VA mapping
+ *
+ * This structure represents a GPU VA mapping and is associated with a
+ * &drm_gpuvm.
+ *
+ * Typically, this structure is embedded in bigger driver structures.
+ */
+struct drm_gpuva {
+ /**
+ * @vm: the &drm_gpuvm this object is associated with
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
+ * &drm_gem_object
+ */
+ struct drm_gpuvm_bo *vm_bo;
+
+ /**
+ * @flags: the &drm_gpuva_flags for this mapping
+ */
+ enum drm_gpuva_flags flags;
+
+ /**
+ * @va: structure containing the address and range of the &drm_gpuva
+ */
+ struct {
+ /**
+ * @va.addr: the start address
+ */
+ u64 addr;
+
+ /*
+ * @range: the range
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and it's offset
+ */
+ struct {
+ /**
+ * @gem.offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @gem.obj: the mapped &drm_gem_object
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo
+ */
+ struct list_head entry;
+ } gem;
+
+ /**
+ * @rb: structure containing data to store &drm_gpuvas in a rb-tree
+ */
+ struct {
+ /**
+ * @rb.node: the rb-tree node
+ */
+ struct rb_node node;
+
+ /**
+ * @rb.entry: The &list_head to additionally connect &drm_gpuvas
+ * in the same order they appear in the interval tree. This is
+ * useful to keep iterating &drm_gpuvas from a start node found
+ * through the rb-tree while doing modifications on the rb-tree
+ * itself.
+ */
+ struct list_head entry;
+
+ /**
+ * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
+ */
+ u64 __subtree_last;
+ } rb;
+};
+
+int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
+void drm_gpuva_remove(struct drm_gpuva *va);
+
+void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
+void drm_gpuva_unlink(struct drm_gpuva *va);
+
+struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
+struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
+
+static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset)
+{
+ va->va.addr = addr;
+ va->va.range = range;
+ va->gem.obj = obj;
+ va->gem.offset = offset;
+}
+
+/**
+ * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
+ * invalidated
+ * @va: the &drm_gpuva to set the invalidate flag for
+ * @invalidate: indicates whether the &drm_gpuva is invalidated
+ */
+static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
+{
+ if (invalidate)
+ va->flags |= DRM_GPUVA_INVALIDATED;
+ else
+ va->flags &= ~DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
+ * is invalidated
+ * @va: the &drm_gpuva to check
+ *
+ * Returns: %true if the GPU VA is invalidated, %false otherwise
+ */
+static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
+{
+ return va->flags & DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * enum drm_gpuvm_flags - flags for struct drm_gpuvm
+ */
+enum drm_gpuvm_flags {
+ /**
+ * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
+ * GPUVM's &dma_resv lock
+ */
+ DRM_GPUVM_RESV_PROTECTED = BIT(0),
+
+ /**
+ * @DRM_GPUVM_USERBITS: user defined bits
+ */
+ DRM_GPUVM_USERBITS = BIT(1),
+};
+
+/**
+ * struct drm_gpuvm - DRM GPU VA Manager
+ *
+ * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
+ * &maple_tree structures. Typically, this structure is embedded in bigger
+ * driver structures.
+ *
+ * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
+ * pages.
+ *
+ * There should be one manager instance per GPU virtual address space.
+ */
+struct drm_gpuvm {
+ /**
+ * @name: the name of the DRM GPU VA space
+ */
+ const char *name;
+
+ /**
+ * @flags: the &drm_gpuvm_flags of this GPUVM
+ */
+ enum drm_gpuvm_flags flags;
+
+ /**
+ * @drm: the &drm_device this VM lives in
+ */
+ struct drm_device *drm;
+
+ /**
+ * @mm_start: start of the VA space
+ */
+ u64 mm_start;
+
+ /**
+ * @mm_range: length of the VA space
+ */
+ u64 mm_range;
+
+ /**
+ * @rb: structures to track &drm_gpuva entries
+ */
+ struct {
+ /**
+ * @rb.tree: the rb-tree to track GPU VA mappings
+ */
+ struct rb_root_cached tree;
+
+ /**
+ * @rb.list: the &list_head to track GPU VA mappings
+ */
+ struct list_head list;
+ } rb;
+
+ /**
+ * @kref: reference count of this object
+ */
+ struct kref kref;
+
+ /**
+ * @kernel_alloc_node:
+ *
+ * &drm_gpuva representing the address space cutout reserved for
+ * the kernel
+ */
+ struct drm_gpuva kernel_alloc_node;
+
+ /**
+ * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
+ */
+ const struct drm_gpuvm_ops *ops;
+
+ /**
+ * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv.
+ */
+ struct drm_gem_object *r_obj;
+
+ /**
+ * @extobj: structure holding the extobj list
+ */
+ struct {
+ /**
+ * @extobj.list: &list_head storing &drm_gpuvm_bos serving as
+ * external object
+ */
+ struct list_head list;
+
+ /**
+ * @extobj.local_list: pointer to the local list temporarily
+ * storing entries from the external object list
+ */
+ struct list_head *local_list;
+
+ /**
+ * @extobj.lock: spinlock to protect the extobj list
+ */
+ spinlock_t lock;
+ } extobj;
+
+ /**
+ * @evict: structure holding the evict list and evict list lock
+ */
+ struct {
+ /**
+ * @evict.list: &list_head storing &drm_gpuvm_bos currently
+ * being evicted
+ */
+ struct list_head list;
+
+ /**
+ * @evict.local_list: pointer to the local list temporarily
+ * storing entries from the evicted object list
+ */
+ struct list_head *local_list;
+
+ /**
+ * @evict.lock: spinlock to protect the evict list
+ */
+ spinlock_t lock;
+ } evict;
+};
+
+void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+ enum drm_gpuvm_flags flags,
+ struct drm_device *drm,
+ struct drm_gem_object *r_obj,
+ u64 start_offset, u64 range,
+ u64 reserve_offset, u64 reserve_range,
+ const struct drm_gpuvm_ops *ops);
+
+/**
+ * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to acquire the reference of
+ *
+ * This function acquires an additional reference to @gpuvm. It is illegal to
+ * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct drm_gpuvm pointer
+ */
+static inline struct drm_gpuvm *
+drm_gpuvm_get(struct drm_gpuvm *gpuvm)
+{
+ kref_get(&gpuvm->kref);
+
+ return gpuvm;
+}
+
+void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
+
+bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
+bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
+
+struct drm_gem_object *
+drm_gpuvm_resv_object_alloc(struct drm_device *drm);
+
+/**
+ * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
+ * set
+ * @gpuvm: the &drm_gpuvm
+ *
+ * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
+ */
+static inline bool
+drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
+{
+ return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
+}
+
+/**
+ * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
+ * @gpuvm__: the &drm_gpuvm
+ *
+ * Returns: a pointer to the &drm_gpuvm's shared &dma_resv
+ */
+#define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
+
+/**
+ * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
+ * &dma_resv
+ * @gpuvm__: the &drm_gpuvm
+ *
+ * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared
+ * &dma_resv
+ */
+#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
+
+#define drm_gpuvm_resv_held(gpuvm__) \
+ dma_resv_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_assert_held(gpuvm__) \
+ dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_held(gpuvm__) \
+ dma_resv_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_assert_held(gpuvm__) \
+ dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
+
+/**
+ * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
+ * external object
+ * @gpuvm: the &drm_gpuvm to check
+ * @obj: the &drm_gem_object to check
+ *
+ * Returns: true if the &drm_gem_object &dma_resv differs from the
+ * &drm_gpuvms &dma_resv, false otherwise
+ */
+static inline bool
+drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ return obj && obj->resv != drm_gpuvm_resv(gpuvm);
+}
+
+static inline struct drm_gpuva *
+__drm_gpuva_next(struct drm_gpuva *va)
+{
+ if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
+ return list_next_entry(va, rb.entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @gpuvm__: &drm_gpuvm to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
+ * between @start__ and @end__. It is implemented similarly to list_for_each(),
+ * but is using the &drm_gpuvm's internal interval tree to accelerate
+ * the search for the starting &drm_gpuva, and hence isn't safe against removal
+ * of elements. It assumes that @end__ is within (or is the upper limit of) the
+ * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
+ * @kernel_alloc_node.
+ */
+#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
+ * &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @gpuvm__: &drm_gpuvm to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
+ * between @start__ and @end__. It is implemented similarly to
+ * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
+ * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
+ * against removal of elements. It assumes that @end__ is within (or is the
+ * upper limit of) the &drm_gpuvm. This iterator does not skip over the
+ * &drm_gpuvm's @kernel_alloc_node.
+ */
+#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
+ next__ = __drm_gpuva_next(va__); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = next__, next__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @gpuvm__: &drm_gpuvm to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuvm.
+ */
+#define drm_gpuvm_for_each_va(va__, gpuvm__) \
+ list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
+
+/**
+ * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @gpuvm__: &drm_gpuvm to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
+ * hence safe against the removal of elements.
+ */
+#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
+ list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
+
+/**
+ * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
+ *
+ * This structure should be created on the stack as &drm_exec should be.
+ *
+ * Optionally, @extra can be set in order to lock additional &drm_gem_objects.
+ */
+struct drm_gpuvm_exec {
+ /**
+ * @exec: the &drm_exec structure
+ */
+ struct drm_exec exec;
+
+ /**
+ * @flags: the flags for the struct drm_exec
+ */
+ uint32_t flags;
+
+ /**
+ * @vm: the &drm_gpuvm to lock its DMA reservations
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @num_fences: the number of fences to reserve for the &dma_resv of the
+ * locked &drm_gem_objects
+ */
+ unsigned int num_fences;
+
+ /**
+ * @extra: Callback and corresponding private data for the driver to
+ * lock arbitrary additional &drm_gem_objects.
+ */
+ struct {
+ /**
+ * @extra.fn: The driver callback to lock additional
+ * &drm_gem_objects.
+ */
+ int (*fn)(struct drm_gpuvm_exec *vm_exec);
+
+ /**
+ * @extra.priv: driver private data for the @fn callback
+ */
+ void *priv;
+ } extra;
+};
+
+int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences);
+
+int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences);
+
+int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ u64 addr, u64 range,
+ unsigned int num_fences);
+
+int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec);
+
+int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
+ struct drm_gem_object **objs,
+ unsigned int num_objs);
+
+int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
+ u64 addr, u64 range);
+
+/**
+ * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * Releases all dma-resv locks of all &drm_gem_objects previously acquired
+ * through drm_gpuvm_exec_lock() or its variants.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static inline void
+drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
+{
+ drm_exec_fini(&vm_exec->exec);
+}
+
+int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec);
+void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage);
+
+/**
+ * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ * @fence: fence to add
+ * @private_usage: private dma-resv usage
+ * @extobj_usage: extobj dma-resv usage
+ *
+ * See drm_gpuvm_resv_add_fence().
+ */
+static inline void
+drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage)
+{
+ drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
+ private_usage, extobj_usage);
+}
+
+/**
+ * drm_gpuvm_exec_validate() - validate all BOs marked as evicted
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * See drm_gpuvm_validate().
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static inline int
+drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
+{
+ return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
+}
+
+/**
+ * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
+ * &drm_gem_object combination
+ *
+ * This structure is an abstraction representing a &drm_gpuvm and
+ * &drm_gem_object combination. It serves as an indirection to accelerate
+ * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
+ * &drm_gem_object.
+ *
+ * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
+ * accelerate validation.
+ *
+ * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
+ * a GEM object is mapped first in a GPU-VM and release the instance once the
+ * last mapping of the GEM object in this GPU-VM is unmapped.
+ */
+struct drm_gpuvm_bo {
+ /**
+ * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
+ * counted pointer.
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @obj: The &drm_gem_object being mapped in @vm. This is a reference
+ * counted pointer.
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @evicted: Indicates whether the &drm_gem_object is evicted; field
+ * protected by the &drm_gem_object's dma-resv lock.
+ */
+ bool evicted;
+
+ /**
+ * @kref: The reference count for this &drm_gpuvm_bo.
+ */
+ struct kref kref;
+
+ /**
+ * @list: Structure containing all &list_heads.
+ */
+ struct {
+ /**
+ * @list.gpuva: The list of linked &drm_gpuvas.
+ *
+ * It is safe to access entries from this list as long as the
+ * GEM's gpuva lock is held. See also struct drm_gem_object.
+ */
+ struct list_head gpuva;
+
+ /**
+ * @list.entry: Structure containing all &list_heads serving as
+ * entry.
+ */
+ struct {
+ /**
+ * @list.entry.gem: List entry to attach to the
+ * &drm_gem_objects gpuva list.
+ */
+ struct list_head gem;
+
+ /**
+ * @list.entry.evict: List entry to attach to the
+ * &drm_gpuvms extobj list.
+ */
+ struct list_head extobj;
+
+ /**
+ * @list.entry.evict: List entry to attach to the
+ * &drm_gpuvms evict list.
+ */
+ struct list_head evict;
+ } entry;
+ } list;
+};
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
+
+/**
+ * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
+ * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
+ *
+ * This function acquires an additional reference to @vm_bo. It is illegal to
+ * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct vm_bo pointer
+ */
+static inline struct drm_gpuvm_bo *
+drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
+{
+ kref_get(&vm_bo->kref);
+ return vm_bo;
+}
+
+bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+
+void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
+
+/**
+ * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
+ * to/from the &drm_gpuvms evicted list
+ * @obj: the &drm_gem_object
+ * @evict: indicates whether @obj is evicted
+ *
+ * See drm_gpuvm_bo_evict().
+ */
+static inline void
+drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj)
+ drm_gpuvm_bo_evict(vm_bo, evict);
+}
+
+void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
+
+/**
+ * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuvm_bo.
+ *
+ * The caller must hold the GEM's gpuva lock.
+ */
+#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
+ list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
+
+/**
+ * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
+ * &drm_gpuva
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @next__: &next &drm_gpuva to store the next step
+ * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ *
+ * The caller must hold the GEM's gpuva lock.
+ */
+#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
+ list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
+
+/**
+ * enum drm_gpuva_op_type - GPU VA operation type
+ *
+ * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
+ */
+enum drm_gpuva_op_type {
+ /**
+ * @DRM_GPUVA_OP_MAP: the map op type
+ */
+ DRM_GPUVA_OP_MAP,
+
+ /**
+ * @DRM_GPUVA_OP_REMAP: the remap op type
+ */
+ DRM_GPUVA_OP_REMAP,
+
+ /**
+ * @DRM_GPUVA_OP_UNMAP: the unmap op type
+ */
+ DRM_GPUVA_OP_UNMAP,
+
+ /**
+ * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
+ */
+ DRM_GPUVA_OP_PREFETCH,
+};
+
+/**
+ * struct drm_gpuva_op_map - GPU VA map operation
+ *
+ * This structure represents a single map operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_map {
+ /**
+ * @va: structure containing address and range of a map
+ * operation
+ */
+ struct {
+ /**
+ * @va.addr: the base address of the new mapping
+ */
+ u64 addr;
+
+ /**
+ * @va.range: the range of the new mapping
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and it's offset
+ */
+ struct {
+ /**
+ * @gem.offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @gem.obj: the &drm_gem_object to map
+ */
+ struct drm_gem_object *obj;
+ } gem;
+};
+
+/**
+ * struct drm_gpuva_op_unmap - GPU VA unmap operation
+ *
+ * This structure represents a single unmap operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_unmap {
+ /**
+ * @va: the &drm_gpuva to unmap
+ */
+ struct drm_gpuva *va;
+
+ /**
+ * @keep:
+ *
+ * Indicates whether this &drm_gpuva is physically contiguous with the
+ * original mapping request.
+ *
+ * Optionally, if &keep is set, drivers may keep the actual page table
+ * mappings for this &drm_gpuva, adding the missing page table entries
+ * only and update the &drm_gpuvm accordingly.
+ */
+ bool keep;
+};
+
+/**
+ * struct drm_gpuva_op_remap - GPU VA remap operation
+ *
+ * This represents a single remap operation generated by the DRM GPU VA manager.
+ *
+ * A remap operation is generated when an existing GPU VA mmapping is split up
+ * by inserting a new GPU VA mapping or by partially unmapping existent
+ * mapping(s), hence it consists of a maximum of two map and one unmap
+ * operation.
+ *
+ * The @unmap operation takes care of removing the original existing mapping.
+ * @prev is used to remap the preceding part, @next the subsequent part.
+ *
+ * If either a new mapping's start address is aligned with the start address
+ * of the old mapping or the new mapping's end address is aligned with the
+ * end address of the old mapping, either @prev or @next is NULL.
+ *
+ * Note, the reason for a dedicated remap operation, rather than arbitrary
+ * unmap and map operations, is to give drivers the chance of extracting driver
+ * specific data for creating the new mappings from the unmap operations's
+ * &drm_gpuva structure which typically is embedded in larger driver specific
+ * structures.
+ */
+struct drm_gpuva_op_remap {
+ /**
+ * @prev: the preceding part of a split mapping
+ */
+ struct drm_gpuva_op_map *prev;
+
+ /**
+ * @next: the subsequent part of a split mapping
+ */
+ struct drm_gpuva_op_map *next;
+
+ /**
+ * @unmap: the unmap operation for the original existing mapping
+ */
+ struct drm_gpuva_op_unmap *unmap;
+};
+
+/**
+ * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
+ *
+ * This structure represents a single prefetch operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_prefetch {
+ /**
+ * @va: the &drm_gpuva to prefetch
+ */
+ struct drm_gpuva *va;
+};
+
+/**
+ * struct drm_gpuva_op - GPU VA operation
+ *
+ * This structure represents a single generic operation.
+ *
+ * The particular type of the operation is defined by @op.
+ */
+struct drm_gpuva_op {
+ /**
+ * @entry:
+ *
+ * The &list_head used to distribute instances of this struct within
+ * &drm_gpuva_ops.
+ */
+ struct list_head entry;
+
+ /**
+ * @op: the type of the operation
+ */
+ enum drm_gpuva_op_type op;
+
+ union {
+ /**
+ * @map: the map operation
+ */
+ struct drm_gpuva_op_map map;
+
+ /**
+ * @remap: the remap operation
+ */
+ struct drm_gpuva_op_remap remap;
+
+ /**
+ * @unmap: the unmap operation
+ */
+ struct drm_gpuva_op_unmap unmap;
+
+ /**
+ * @prefetch: the prefetch operation
+ */
+ struct drm_gpuva_op_prefetch prefetch;
+ };
+};
+
+/**
+ * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
+ */
+struct drm_gpuva_ops {
+ /**
+ * @list: the &list_head
+ */
+ struct list_head list;
+};
+
+/**
+ * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations.
+ */
+#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @next: &next &drm_gpuva_op to store the next step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations. It is
+ * implemented with list_for_each_safe(), so save against removal of elements.
+ */
+#define drm_gpuva_for_each_op_safe(op, next, ops) \
+ list_for_each_entry_safe(op, next, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations beginning
+ * from the given operation in reverse order.
+ */
+#define drm_gpuva_for_each_op_from_reverse(op, ops) \
+ list_for_each_entry_from_reverse(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations in reverse
+ */
+#define drm_gpuva_for_each_op_reverse(op, ops) \
+ list_for_each_entry_reverse(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
+ */
+#define drm_gpuva_first_op(ops) \
+ list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
+ */
+#define drm_gpuva_last_op(ops) \
+ list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
+
+/**
+ * drm_gpuva_next_op() - next &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_next_op(op) list_next_entry(op, entry)
+
+struct drm_gpuva_ops *
+drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset);
+struct drm_gpuva_ops *
+drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
+
+void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
+ struct drm_gpuva_ops *ops);
+
+static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op)
+{
+ drm_gpuva_init(va, op->va.addr, op->va.range,
+ op->gem.obj, op->gem.offset);
+}
+
+/**
+ * struct drm_gpuvm_ops - callbacks for split/merge steps
+ *
+ * This structure defines the callbacks used by &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
+ * operations to drivers.
+ */
+struct drm_gpuvm_ops {
+ /**
+ * @vm_free: called when the last reference of a struct drm_gpuvm is
+ * dropped
+ *
+ * This callback is mandatory.
+ */
+ void (*vm_free)(struct drm_gpuvm *gpuvm);
+
+ /**
+ * @op_alloc: called when the &drm_gpuvm allocates
+ * a struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * allocate memory accordingly.
+ *
+ * This callback is optional.
+ */
+ struct drm_gpuva_op *(*op_alloc)(void);
+
+ /**
+ * @op_free: called when the &drm_gpuvm frees a
+ * struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * free the previously allocated memory accordingly.
+ *
+ * This callback is optional.
+ */
+ void (*op_free)(struct drm_gpuva_op *op);
+
+ /**
+ * @vm_bo_alloc: called when the &drm_gpuvm allocates
+ * a struct drm_gpuvm_bo
+ *
+ * Some drivers may want to embed struct drm_gpuvm_bo into driver
+ * specific structures. By implementing this callback drivers can
+ * allocate memory accordingly.
+ *
+ * This callback is optional.
+ */
+ struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
+
+ /**
+ * @vm_bo_free: called when the &drm_gpuvm frees a
+ * struct drm_gpuvm_bo
+ *
+ * Some drivers may want to embed struct drm_gpuvm_bo into driver
+ * specific structures. By implementing this callback drivers can
+ * free the previously allocated memory accordingly.
+ *
+ * This callback is optional.
+ */
+ void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
+
+ /**
+ * @vm_bo_validate: called from drm_gpuvm_validate()
+ *
+ * Drivers receive this callback for every evicted &drm_gem_object being
+ * mapped in the corresponding &drm_gpuvm.
+ *
+ * Typically, drivers would call their driver specific variant of
+ * ttm_bo_validate() from within this callback.
+ */
+ int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo,
+ struct drm_exec *exec);
+
+ /**
+ * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
+ * mapping once all previous steps were completed
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
+ *
+ * Can be NULL if &drm_gpuvm_sm_map is used.
+ */
+ int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_remap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to split up an existent mapping
+ *
+ * This callback is called when existent mapping needs to be split up.
+ * This is the case when either a newly requested mapping overlaps or
+ * is enclosed by an existent mapping or a partial unmap of an existent
+ * mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
+ * used.
+ */
+ int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_unmap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to unmap an existent mapping
+ *
+ * This callback is called when existent mapping needs to be unmapped.
+ * This is the case when either a newly requested mapping encloses an
+ * existent mapping or an unmap of an existent mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
+ * used.
+ */
+ int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
+};
+
+int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
+ u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset);
+
+int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
+ u64 addr, u64 range);
+
+void drm_gpuva_map(struct drm_gpuvm *gpuvm,
+ struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op);
+
+void drm_gpuva_remap(struct drm_gpuva *prev,
+ struct drm_gpuva *next,
+ struct drm_gpuva_op_remap *op);
+
+void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
+
+/**
+ * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
+ * the unmap stage of a remap op.
+ * @op: Remap op.
+ * @start_addr: Output pointer for the start of the required unmap.
+ * @range: Output pointer for the length of the required unmap.
+ *
+ * The given start address and range will be set such that they represent the
+ * range of the address space that was previously covered by the mapping being
+ * re-mapped, but is now empty.
+ */
+static inline void
+drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op,
+ u64 *start_addr, u64 *range)
+{
+ const u64 va_start = op->prev ?
+ op->prev->va.addr + op->prev->va.range :
+ op->unmap->va->va.addr;
+ const u64 va_end = op->next ?
+ op->next->va.addr :
+ op->unmap->va->va.addr + op->unmap->va->va.range;
+
+ if (start_addr)
+ *start_addr = va_start;
+ if (range)
+ *range = va_end - va_start;
+}
+
+#endif /* __DRM_GPUVM_H__ */
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index 6ed61c371f6c..171760b6c4a1 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -110,17 +110,6 @@ enum drm_ioctl_flags {
*/
DRM_ROOT_ONLY = BIT(2),
/**
- * @DRM_UNLOCKED:
- *
- * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
- * or not. Enforced as the default for all modern drivers, hence there
- * should never be a need to set this flag.
- *
- * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the
- * only legacy IOCTL which needs this.
- */
- DRM_UNLOCKED = BIT(4),
- /**
* @DRM_RENDER_ALLOW:
*
* This is used for all ioctl needed for rendering only, for drivers
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index ed013fdcc1ff..ba483c87f0e7 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -3,6 +3,8 @@
#ifndef DRM_KUNIT_HELPERS_H_
#define DRM_KUNIT_HELPERS_H_
+#include <linux/device.h>
+
#include <kunit/test.h>
struct drm_device;
@@ -51,7 +53,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
{
struct drm_driver *driver;
- driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+ driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, driver);
driver->driver_features = features;
@@ -87,5 +89,12 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
sizeof(_type), \
offsetof(_type, _member), \
_feat))
+struct drm_modeset_acquire_ctx *
+drm_kunit_helper_acquire_ctx_alloc(struct kunit *test);
+
+struct drm_atomic_state *
+drm_kunit_helper_atomic_state_alloc(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_modeset_acquire_ctx *ctx);
#endif // DRM_KUNIT_HELPERS_H_
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
deleted file mode 100644
index 0fc85418aad8..000000000000
--- a/include/drm/drm_legacy.h
+++ /dev/null
@@ -1,331 +0,0 @@
-#ifndef __DRM_DRM_LEGACY_H__
-#define __DRM_DRM_LEGACY_H__
-/*
- * Legacy driver interfaces for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- * Copyright © 2014 Intel Corporation
- * Daniel Vetter <daniel.vetter@ffwll.ch>
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/agp_backend.h>
-
-#include <drm/drm.h>
-#include <drm/drm_auth.h>
-
-struct drm_device;
-struct drm_driver;
-struct file;
-struct pci_driver;
-
-/*
- * Legacy Support for palateontologic DRM drivers
- *
- * If you add a new driver and it uses any of these functions or structures,
- * you're doing it terribly wrong.
- */
-
-/*
- * Hash-table Support
- */
-
-struct drm_hash_item {
- struct hlist_node head;
- unsigned long key;
-};
-
-struct drm_open_hash {
- struct hlist_head *table;
- u8 order;
-};
-
-/**
- * DMA buffer.
- */
-struct drm_buf {
- int idx; /**< Index into master buflist */
- int total; /**< Buffer size */
- int order; /**< log-base-2(total) */
- int used; /**< Amount of buffer in use (for DMA) */
- unsigned long offset; /**< Byte offset (used internally) */
- void *address; /**< Address of buffer */
- unsigned long bus_address; /**< Bus address of buffer */
- struct drm_buf *next; /**< Kernel-only: used for free list */
- __volatile__ int waiting; /**< On kernel DMA queue */
- __volatile__ int pending; /**< On hardware DMA queue */
- struct drm_file *file_priv; /**< Private of holding file descr */
- int context; /**< Kernel queue for this buffer */
- int while_locked; /**< Dispatch this buffer while locked */
- enum {
- DRM_LIST_NONE = 0,
- DRM_LIST_FREE = 1,
- DRM_LIST_WAIT = 2,
- DRM_LIST_PEND = 3,
- DRM_LIST_PRIO = 4,
- DRM_LIST_RECLAIM = 5
- } list; /**< Which list we're on */
-
- int dev_priv_size; /**< Size of buffer private storage */
- void *dev_private; /**< Per-buffer private storage */
-};
-
-typedef struct drm_dma_handle {
- dma_addr_t busaddr;
- void *vaddr;
- size_t size;
-} drm_dma_handle_t;
-
-/**
- * Buffer entry. There is one of this for each buffer size order.
- */
-struct drm_buf_entry {
- int buf_size; /**< size */
- int buf_count; /**< number of buffers */
- struct drm_buf *buflist; /**< buffer list */
- int seg_count;
- int page_order;
- struct drm_dma_handle **seglist;
-
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
-};
-
-/**
- * DMA data.
- */
-struct drm_device_dma {
-
- struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
- int buf_count; /**< total number of buffers */
- struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
- int seg_count;
- int page_count; /**< number of pages */
- unsigned long *pagelist; /**< page list */
- unsigned long byte_count;
- enum {
- _DRM_DMA_USE_AGP = 0x01,
- _DRM_DMA_USE_SG = 0x02,
- _DRM_DMA_USE_FB = 0x04,
- _DRM_DMA_USE_PCI_RO = 0x08
- } flags;
-
-};
-
-/**
- * Scatter-gather memory.
- */
-struct drm_sg_mem {
- unsigned long handle;
- void *virtual;
- int pages;
- struct page **pagelist;
- dma_addr_t *busaddr;
-};
-
-/**
- * Kernel side of a mapping
- */
-struct drm_local_map {
- dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/
- unsigned long size; /**< Requested physical size (bytes) */
- enum drm_map_type type; /**< Type of memory to map */
- enum drm_map_flags flags; /**< Flags */
- void *handle; /**< User-space: "Handle" to pass to mmap() */
- /**< Kernel-space: kernel-virtual address */
- int mtrr; /**< MTRR slot used */
-};
-
-typedef struct drm_local_map drm_local_map_t;
-
-/**
- * Mappings list
- */
-struct drm_map_list {
- struct list_head head; /**< list head */
- struct drm_hash_item hash;
- struct drm_local_map *map; /**< mapping */
- uint64_t user_token;
- struct drm_master *master;
-};
-
-int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_p);
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
-void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
-int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
-
-int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
-int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
-
-/**
- * Test that the hardware lock is held by the caller, returning otherwise.
- *
- * \param dev DRM device.
- * \param filp file pointer of the caller.
- */
-#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
-do { \
- if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
- _file_priv->master->lock.file_priv != _file_priv) { \
- DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
- __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
- _file_priv->master->lock.file_priv, _file_priv); \
- return -EINVAL; \
- } \
-} while (0)
-
-void drm_legacy_idlelock_take(struct drm_lock_data *lock);
-void drm_legacy_idlelock_release(struct drm_lock_data *lock);
-
-/* drm_irq.c */
-int drm_legacy_irq_uninstall(struct drm_device *dev);
-
-/* drm_pci.c */
-
-#ifdef CONFIG_PCI
-
-int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver);
-void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver);
-
-#else
-
-static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
- size_t size, size_t align)
-{
- return NULL;
-}
-
-static inline void drm_pci_free(struct drm_device *dev,
- struct drm_dma_handle *dmah)
-{
-}
-
-static inline int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- return -EINVAL;
-}
-
-static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
-}
-
-#endif
-
-/*
- * AGP Support
- */
-
-struct drm_agp_head {
- struct agp_kern_info agp_info;
- struct list_head memory;
- unsigned long mode;
- struct agp_bridge_data *bridge;
- int enabled;
- int acquired;
- unsigned long base;
- int agp_mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
-};
-
-#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
-struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev);
-int drm_legacy_agp_acquire(struct drm_device *dev);
-int drm_legacy_agp_release(struct drm_device *dev);
-int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-#else
-static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
-{
- return NULL;
-}
-
-static inline int drm_legacy_agp_acquire(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_release(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_enable(struct drm_device *dev,
- struct drm_agp_mode mode)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_info(struct drm_device *dev,
- struct drm_agp_info *info)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_alloc(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_free(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_unbind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_legacy_agp_bind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-#endif
-
-/* drm_memory.c */
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
-
-#endif /* __DRM_DRM_LEGACY_H__ */
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 816f196b3d4c..e8e0f8d39f3a 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -12,6 +12,7 @@
#include <drm/drm_device.h>
#include <drm/drm_simple_kms_helper.h>
+struct drm_format_conv_state;
struct drm_rect;
struct gpio_desc;
struct iosys_map;
@@ -192,7 +193,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
size_t len);
int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap);
+ struct drm_rect *clip, bool swap,
+ struct drm_format_conv_state *fmtcnv_state);
/**
* mipi_dbi_command - MIPI DCS command with optional parameter(s)
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index c9df0407980c..c0aec0d4d664 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -168,6 +168,7 @@ struct mipi_dsi_device_info {
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
+ * @attached: the DSI device has been successfully attached
* @name: DSI peripheral chip type
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
@@ -184,6 +185,7 @@ struct mipi_dsi_device_info {
struct mipi_dsi_device {
struct mipi_dsi_host *host;
struct device dev;
+ bool attached;
char name[DSI_DEV_NAME_SIZE];
unsigned int channel;
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 912f1e415685..08d7a7f0188f 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -60,7 +60,7 @@ struct drm_mode_object {
void (*free_cb)(struct kref *kref);
};
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
/**
* struct drm_object_properties - property tracking for &drm_mode_object
*/
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 965faf082a6d..881b03e4dc28 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -59,8 +59,8 @@ enum mode_set_atomic {
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_crtc_helper_funcs {
/**
@@ -134,7 +134,7 @@ struct drm_crtc_helper_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup or @atomic_check.
*
@@ -216,9 +216,7 @@ struct drm_crtc_helper_funcs {
*
* This callback is used to update the display mode of a CRTC without
* changing anything of the primary plane configuration. This fits the
- * requirement of atomic and hence is used by the atomic helpers. It is
- * also used by the transitional plane helpers to implement a
- * @mode_set hook in drm_helper_crtc_mode_set().
+ * requirement of atomic and hence is used by the atomic helpers.
*
* Note that the display pipe is completely off when this function is
* called. Atomic drivers which need hardware to be running before they
@@ -333,8 +331,8 @@ struct drm_crtc_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
@@ -373,8 +371,8 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_begin)(struct drm_crtc *crtc,
struct drm_atomic_state *state);
@@ -397,8 +395,8 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_flush)(struct drm_crtc *crtc,
struct drm_atomic_state *state);
@@ -507,8 +505,8 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
/**
* struct drm_encoder_helper_funcs - helper operations for encoders
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_encoder_helper_funcs {
/**
@@ -552,7 +550,7 @@ struct drm_encoder_helper_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup or @atomic_check.
*
@@ -1156,6 +1154,11 @@ struct drm_connector_helper_funcs {
* This operation is optional.
*
* This callback is used by the drm_kms_helper_poll_enable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
*/
void (*enable_hpd)(struct drm_connector *connector);
@@ -1167,6 +1170,11 @@ struct drm_connector_helper_funcs {
* This operation is optional.
*
* This callback is used by the drm_kms_helper_poll_disable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
*/
void (*disable_hpd)(struct drm_connector *connector);
};
@@ -1185,8 +1193,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
/**
* struct drm_plane_helper_funcs - helper operations for planes
*
- * These functions are used by the atomic helpers and by the transitional plane
- * helpers.
+ * These functions are used by the atomic helpers.
*/
struct drm_plane_helper_funcs {
/**
@@ -1221,9 +1228,8 @@ struct drm_plane_helper_funcs {
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional. See @begin_fb_access
- * for preparing per-commit resources.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. See @begin_fb_access for preparing per-commit resources.
*
* RETURNS:
*
@@ -1240,8 +1246,8 @@ struct drm_plane_helper_funcs {
* This hook is called to clean up any resources allocated for the given
* framebuffer and plane configuration in @prepare_fb.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*cleanup_fb)(struct drm_plane *plane,
struct drm_plane_state *old_state);
@@ -1295,8 +1301,8 @@ struct drm_plane_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
@@ -1326,8 +1332,7 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is optional.
*/
void (*atomic_update)(struct drm_plane *plane,
struct drm_atomic_state *state);
@@ -1376,9 +1381,8 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional. It's intended to
- * reverse the effects of @atomic_enable.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. It's intended to reverse the effects of @atomic_enable.
*/
void (*atomic_disable)(struct drm_plane *plane,
struct drm_atomic_state *state);
@@ -1470,7 +1474,7 @@ struct drm_mode_config_helper_funcs {
* swapped into the various state pointers. The passed in state
* therefore contains copies of the old/previous state. This hook should
* commit the new state into hardware. Note that the helpers have
- * already waited for preceeding atomic commits and fences, but drivers
+ * already waited for preceding atomic commits and fences, but drivers
* can add more waiting calls at the start of their implementation, e.g.
* to wait for driver-internal request for implicit syncing, before
* starting to commit the update to the hardware.
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 432fab2347eb..10015891b056 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -27,12 +27,14 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/list.h>
+#include <linux/mutex.h>
struct backlight_device;
struct dentry;
struct device_node;
struct drm_connector;
struct drm_device;
+struct drm_panel_follower;
struct drm_panel;
struct display_timing;
@@ -144,6 +146,45 @@ struct drm_panel_funcs {
void (*debugfs_init)(struct drm_panel *panel, struct dentry *root);
};
+struct drm_panel_follower_funcs {
+ /**
+ * @panel_prepared:
+ *
+ * Called after the panel has been powered on.
+ */
+ int (*panel_prepared)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_unpreparing:
+ *
+ * Called before the panel is powered off.
+ */
+ int (*panel_unpreparing)(struct drm_panel_follower *follower);
+};
+
+struct drm_panel_follower {
+ /**
+ * @funcs:
+ *
+ * Dependent device callbacks; should be initted by the caller.
+ */
+ const struct drm_panel_follower_funcs *funcs;
+
+ /**
+ * @list
+ *
+ * Used for linking into panel's list; set by drm_panel_add_follower().
+ */
+ struct list_head list;
+
+ /**
+ * @panel
+ *
+ * The panel we're dependent on; set by drm_panel_add_follower().
+ */
+ struct drm_panel *panel;
+};
+
/**
* struct drm_panel - DRM panel object
*/
@@ -190,6 +231,20 @@ struct drm_panel {
struct list_head list;
/**
+ * @followers:
+ *
+ * A list of struct drm_panel_follower dependent on this panel.
+ */
+ struct list_head followers;
+
+ /**
+ * @follower_lock:
+ *
+ * Lock for followers list.
+ */
+ struct mutex follower_lock;
+
+ /**
* @prepare_prev_first:
*
* The previous controller should be prepared first, before the prepare
@@ -198,6 +253,20 @@ struct drm_panel {
* the panel is powered up.
*/
bool prepare_prev_first;
+
+ /**
+ * @prepared:
+ *
+ * If true then the panel has been prepared.
+ */
+ bool prepared;
+
+ /**
+ * @enabled:
+ *
+ * If true then the panel has been enabled.
+ */
+ bool enabled;
};
void drm_panel_init(struct drm_panel *panel, struct device *dev,
@@ -232,6 +301,33 @@ static inline int of_drm_get_panel_orientation(const struct device_node *np,
}
#endif
+#if defined(CONFIG_DRM_PANEL)
+bool drm_is_panel_follower(struct device *dev);
+int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+void drm_panel_remove_follower(struct drm_panel_follower *follower);
+int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+#else
+static inline bool drm_is_panel_follower(struct device *dev)
+{
+ return false;
+}
+
+static inline int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
+
+static inline void drm_panel_remove_follower(struct drm_panel_follower *follower) { }
+static inline int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
+#endif
+
#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
(IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
int drm_panel_of_backlight(struct drm_panel *panel);
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 51291983ea44..641fe298052d 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -56,7 +56,7 @@ struct drm_plane_state {
/**
* @crtc:
*
- * Currently bound CRTC, NULL if disabled. Do not this write directly,
+ * Currently bound CRTC, NULL if disabled. Do not write this directly,
* use drm_atomic_set_crtc_for_plane()
*/
struct drm_crtc *crtc;
@@ -116,6 +116,10 @@ struct drm_plane_state {
/** @src_h: height of visible portion of plane (in 16.16) */
uint32_t src_h, src_w;
+ /** @hotspot_x: x offset to mouse cursor hotspot */
+ /** @hotspot_y: y offset to mouse cursor hotspot */
+ int32_t hotspot_x, hotspot_y;
+
/**
* @alpha:
* Opacity of the plane with 0 as completely transparent and 0xffff as
@@ -191,6 +195,16 @@ struct drm_plane_state {
struct drm_property_blob *fb_damage_clips;
/**
+ * @ignore_damage_clips:
+ *
+ * Set by drivers to indicate the drm_atomic_helper_damage_iter_init()
+ * helper that the @fb_damage_clips blob property should be ignored.
+ *
+ * See :ref:`damage_tracking_properties` for more information.
+ */
+ bool ignore_damage_clips;
+
+ /**
* @src:
*
* source coordinates of the plane (in 16.16).
@@ -237,6 +251,13 @@ struct drm_plane_state {
/** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
+
+ /**
+ * @color_mgmt_changed: Color management properties have changed. Used
+ * by the atomic helpers and drivers to steer the atomic commit control
+ * flow.
+ */
+ bool color_mgmt_changed : 1;
};
static inline struct drm_rect
@@ -748,6 +769,16 @@ struct drm_plane {
* scaling.
*/
struct drm_property *scaling_filter_property;
+
+ /**
+ * @hotspot_x_property: property to set mouse hotspot x offset.
+ */
+ struct drm_property *hotspot_x_property;
+
+ /**
+ * @hotspot_y_property: property to set mouse hotspot y offset.
+ */
+ struct drm_property *hotspot_y_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 3a574e8cd22f..75f9c4830564 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -26,7 +26,6 @@
#include <linux/types.h>
-struct drm_atomic_state;
struct drm_crtc;
struct drm_framebuffer;
struct drm_modeset_acquire_ctx;
@@ -42,7 +41,6 @@ int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *cr
int drm_plane_helper_disable_primary(struct drm_plane *plane,
struct drm_modeset_acquire_ctx *ctx);
void drm_plane_helper_destroy(struct drm_plane *plane);
-int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state);
/**
* DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index a93a387f8a1a..dd4883df876a 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -453,7 +453,7 @@ void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
/* Helper for struct drm_device based logging. */
#define __drm_printk(drm, level, type, fmt, ...) \
- dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__)
+ dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__)
#define drm_info(drm, fmt, ...) \
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 4977e0ab72db..fad3c4003b2b 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -25,6 +25,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector);
void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
+void drm_kms_helper_poll_reschedule(struct drm_device *dev);
bool drm_kms_helper_is_poll_worker(void);
enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 65bc9710a470..082f29156b3e 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -279,6 +279,12 @@ struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
const void *data);
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id);
+int drm_property_replace_blob_from_id(struct drm_device *dev,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ ssize_t expected_elem_size,
+ bool *replaced);
int drm_property_replace_global_blob(struct drm_device *dev,
struct drm_property_blob **replace,
size_t length,
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 6cf7243a1dc5..b40052132e52 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -54,7 +54,11 @@ struct drm_syncobj {
*/
struct list_head cb_list;
/**
- * @lock: Protects &cb_list and write-locks &fence.
+ * @ev_fd_list: List of registered eventfd.
+ */
+ struct list_head ev_fd_list;
+ /**
+ * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence.
*/
spinlock_t lock;
/**
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index 6273cac44e47..96a5d858404b 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -12,6 +12,6 @@ void drm_class_device_unregister(struct device *dev);
void drm_sysfs_hotplug_event(struct drm_device *dev);
void drm_sysfs_connector_hotplug_event(struct drm_connector *connector);
-void drm_sysfs_connector_status_event(struct drm_connector *connector,
- struct drm_property *property);
+void drm_sysfs_connector_property_event(struct drm_connector *connector,
+ struct drm_property *property);
#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index c0586d832260..5acc64954a88 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -63,13 +63,12 @@ struct drm_file;
* to an array, and as such should start at 0.
*/
enum drm_sched_priority {
- DRM_SCHED_PRIORITY_MIN,
- DRM_SCHED_PRIORITY_NORMAL,
- DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
+ DRM_SCHED_PRIORITY_HIGH,
+ DRM_SCHED_PRIORITY_NORMAL,
+ DRM_SCHED_PRIORITY_LOW,
- DRM_SCHED_PRIORITY_COUNT,
- DRM_SCHED_PRIORITY_UNSET = -2
+ DRM_SCHED_PRIORITY_COUNT
};
/* Used to chose between FIFO and RR jobs scheduling */
@@ -201,7 +200,7 @@ struct drm_sched_entity {
* by the scheduler thread, can be accessed locklessly from
* drm_sched_job_arm() iff the queue is empty.
*/
- struct dma_fence *last_scheduled;
+ struct dma_fence __rcu *last_scheduled;
/**
* @last_user: last group leader pushing a job into the entity.
@@ -321,6 +320,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
+ * @credits: the number of credits this job contributes to the scheduler
* @work: Helper to reschdeule job kill to different context.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
@@ -340,6 +340,8 @@ struct drm_sched_job {
struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence;
+ u32 credits;
+
/*
* work is used only after finish_cb has been used and will not be
* accessed anymore.
@@ -463,27 +465,42 @@ struct drm_sched_backend_ops {
* and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
+
+ /**
+ * @update_job_credits: Called when the scheduler is considering this
+ * job for execution.
+ *
+ * This callback returns the number of credits the job would take if
+ * pushed to the hardware. Drivers may use this to dynamically update
+ * the job's credit count. For instance, deduct the number of credits
+ * for already signalled native fences.
+ *
+ * This callback is optional.
+ */
+ u32 (*update_job_credits)(struct drm_sched_job *sched_job);
};
/**
* struct drm_gpu_scheduler - scheduler instance-specific data
*
* @ops: backend operations provided by the driver.
- * @hw_submission_limit: the max size of the hardware queue.
+ * @credit_limit: the credit limit of this scheduler
+ * @credit_count: the current credit count of this scheduler
* @timeout: the time after which a job is removed from the scheduler.
* @name: name of the ring for which this scheduler is being used.
- * @sched_rq: priority wise array of run queues.
- * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
- * is ready to be scheduled.
+ * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
+ * as there's usually one run-queue per priority, but could be less.
+ * @sched_rq: An allocated array of run-queues of size @num_rqs;
* @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished.
- * @hw_rq_count: the number of jobs currently in the hardware queue.
* @job_id_count: used to assign unique id to the each job.
+ * @submit_wq: workqueue used to queue @work_run_job and @work_free_job
* @timeout_wq: workqueue used to queue @work_tdr
+ * @work_run_job: work which calls run_job op of each scheduler.
+ * @work_free_job: work which calls free_job op of each scheduler.
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over.
- * @thread: the kthread on which the scheduler which run.
* @pending_list: the list of jobs which are currently in the job queue.
* @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
@@ -492,23 +509,27 @@ struct drm_sched_backend_ops {
* @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
+ * @pause_submit: pause queuing of @work_run_job on @submit_wq
+ * @own_submit_wq: scheduler owns allocation of @submit_wq
* @dev: system &struct device
*
* One scheduler is implemented for each hardware ring.
*/
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops *ops;
- uint32_t hw_submission_limit;
+ u32 credit_limit;
+ atomic_t credit_count;
long timeout;
const char *name;
- struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
- wait_queue_head_t wake_up_worker;
+ u32 num_rqs;
+ struct drm_sched_rq **sched_rq;
wait_queue_head_t job_scheduled;
- atomic_t hw_rq_count;
atomic64_t job_id_count;
+ struct workqueue_struct *submit_wq;
struct workqueue_struct *timeout_wq;
+ struct work_struct work_run_job;
+ struct work_struct work_free_job;
struct delayed_work work_tdr;
- struct task_struct *thread;
struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
@@ -516,19 +537,22 @@ struct drm_gpu_scheduler {
atomic_t _score;
bool ready;
bool free_guilty;
+ bool pause_submit;
+ bool own_submit_wq;
struct device *dev;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
- uint32_t hw_submission, unsigned hang_limit,
+ struct workqueue_struct *submit_wq,
+ u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name, struct device *dev);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- void *owner);
+ u32 credits, void *owner);
void drm_sched_job_arm(struct drm_sched_job *job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
@@ -548,8 +572,12 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list);
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
@@ -581,17 +609,17 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
+int drm_sched_entity_error(struct drm_sched_entity *entity);
-void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
- struct dma_fence *fence);
struct drm_sched_fence *drm_sched_fence_alloc(
struct drm_sched_entity *s_entity, void *owner);
void drm_sched_fence_init(struct drm_sched_fence *fence,
struct drm_sched_entity *entity);
void drm_sched_fence_free(struct drm_sched_fence *fence);
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
-void drm_sched_fence_finished(struct drm_sched_fence *fence);
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
+ struct dma_fence *parent);
+void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index c1e2a43d2d1e..56a84ee1c64c 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -29,7 +29,8 @@
enum i915_component_type {
I915_COMPONENT_AUDIO = 1,
I915_COMPONENT_HDCP,
- I915_COMPONENT_PXP
+ I915_COMPONENT_PXP,
+ I915_COMPONENT_GSC_PROXY,
};
/* MAX_PORT is the number of port
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 7adce327c1c2..adff68538484 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -42,7 +42,7 @@ extern struct resource intel_graphics_stolen_res;
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
* This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga rbiter.
+ * cares about the vga bit for the vga arbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
diff --git a/include/drm/i915_gsc_proxy_mei_interface.h b/include/drm/i915_gsc_proxy_mei_interface.h
new file mode 100644
index 000000000000..9462341d3ae1
--- /dev/null
+++ b/include/drm/i915_gsc_proxy_mei_interface.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2022-2023 Intel Corporation
+ */
+
+#ifndef _I915_GSC_PROXY_MEI_INTERFACE_H_
+#define _I915_GSC_PROXY_MEI_INTERFACE_H_
+
+#include <linux/types.h>
+
+struct device;
+struct module;
+
+/**
+ * struct i915_gsc_proxy_component_ops - ops for GSC Proxy services.
+ * @owner: Module providing the ops
+ * @send: sends a proxy message from GSC FW to ME FW
+ * @recv: receives a proxy message for GSC FW from ME FW
+ */
+struct i915_gsc_proxy_component_ops {
+ struct module *owner;
+
+ /**
+ * send - Sends a proxy message to ME FW.
+ * @dev: device struct corresponding to the mei device
+ * @buf: message buffer to send
+ * @size: size of the message
+ * Return: bytes sent on success, negative errno value on failure
+ */
+ int (*send)(struct device *dev, const void *buf, size_t size);
+
+ /**
+ * recv - Receives a proxy message from ME FW.
+ * @dev: device struct corresponding to the mei device
+ * @buf: message buffer to contain the received message
+ * @size: size of the buffer
+ * Return: bytes received on success, negative errno value on failure
+ */
+ int (*recv)(struct device *dev, void *buf, size_t size);
+};
+
+/**
+ * struct i915_gsc_proxy_component - Used for communication between i915 and
+ * MEI drivers for GSC proxy services
+ * @mei_dev: device that provide the GSC proxy service.
+ * @ops: Ops implemented by GSC proxy driver, used by i915 driver.
+ */
+struct i915_gsc_proxy_component {
+ struct device *mei_dev;
+ const struct i915_gsc_proxy_component_ops *ops;
+};
+
+#endif /* _I915_GSC_PROXY_MEI_INTERFACE_H_ */
diff --git a/include/drm/i915_hdcp_interface.h b/include/drm/i915_hdcp_interface.h
index 2059b066f8a1..4c9c8167c2d5 100644
--- a/include/drm/i915_hdcp_interface.h
+++ b/include/drm/i915_hdcp_interface.h
@@ -168,12 +168,12 @@ struct i915_hdcp_ops {
};
/**
- * struct i915_hdcp_master - Used for communication between i915
+ * struct i915_hdcp_arbiter - Used for communication between i915
* and hdcp drivers for the HDCP2.2 services
* @hdcp_dev: device that provide the HDCP2.2 service from MEI Bus.
* @hdcp_ops: Ops implemented by hdcp driver or intel_hdcp_gsc , used by i915 driver.
*/
-struct i915_hdcp_master {
+struct i915_hdcp_arbiter {
struct device *hdcp_dev;
const struct i915_hdcp_ops *ops;
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index e1e10dfbb661..fcf1849aa47c 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -689,14 +689,18 @@
#define INTEL_RPLU_IDS(info) \
INTEL_VGA_DEVICE(0xA721, info), \
INTEL_VGA_DEVICE(0xA7A1, info), \
- INTEL_VGA_DEVICE(0xA7A9, info)
+ INTEL_VGA_DEVICE(0xA7A9, info), \
+ INTEL_VGA_DEVICE(0xA7AC, info), \
+ INTEL_VGA_DEVICE(0xA7AD, info)
/* RPL-P */
#define INTEL_RPLP_IDS(info) \
INTEL_RPLU_IDS(info), \
INTEL_VGA_DEVICE(0xA720, info), \
INTEL_VGA_DEVICE(0xA7A0, info), \
- INTEL_VGA_DEVICE(0xA7A8, info)
+ INTEL_VGA_DEVICE(0xA7A8, info), \
+ INTEL_VGA_DEVICE(0xA7AA, info), \
+ INTEL_VGA_DEVICE(0xA7AB, info)
/* DG2 */
#define INTEL_DG2_G10_IDS(info) \
@@ -714,7 +718,11 @@
INTEL_VGA_DEVICE(0x56A5, info), \
INTEL_VGA_DEVICE(0x56A6, info), \
INTEL_VGA_DEVICE(0x56B0, info), \
- INTEL_VGA_DEVICE(0x56B1, info)
+ INTEL_VGA_DEVICE(0x56B1, info), \
+ INTEL_VGA_DEVICE(0x56BA, info), \
+ INTEL_VGA_DEVICE(0x56BB, info), \
+ INTEL_VGA_DEVICE(0x56BC, info), \
+ INTEL_VGA_DEVICE(0x56BD, info)
#define INTEL_DG2_G12_IDS(info) \
INTEL_VGA_DEVICE(0x5696, info), \
@@ -730,7 +738,8 @@
INTEL_DG2_G12_IDS(info)
#define INTEL_ATS_M150_IDS(info) \
- INTEL_VGA_DEVICE(0x56C0, info)
+ INTEL_VGA_DEVICE(0x56C0, info), \
+ INTEL_VGA_DEVICE(0x56C2, info)
#define INTEL_ATS_M75_IDS(info) \
INTEL_VGA_DEVICE(0x56C1, info)
@@ -738,18 +747,14 @@
#define INTEL_ATS_M_IDS(info) \
INTEL_ATS_M150_IDS(info), \
INTEL_ATS_M75_IDS(info)
+
/* MTL */
-#define INTEL_MTL_M_IDS(info) \
+#define INTEL_MTL_IDS(info) \
INTEL_VGA_DEVICE(0x7D40, info), \
- INTEL_VGA_DEVICE(0x7D60, info)
-
-#define INTEL_MTL_P_IDS(info) \
INTEL_VGA_DEVICE(0x7D45, info), \
INTEL_VGA_DEVICE(0x7D55, info), \
+ INTEL_VGA_DEVICE(0x7D60, info), \
+ INTEL_VGA_DEVICE(0x7D67, info), \
INTEL_VGA_DEVICE(0x7DD5, info)
-#define INTEL_MTL_IDS(info) \
- INTEL_MTL_M_IDS(info), \
- INTEL_MTL_P_IDS(info)
-
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/i915_pxp_tee_interface.h b/include/drm/i915_pxp_tee_interface.h
index a702b6ec17f7..7d96985f2d05 100644
--- a/include/drm/i915_pxp_tee_interface.h
+++ b/include/drm/i915_pxp_tee_interface.h
@@ -22,8 +22,10 @@ struct i915_pxp_component_ops {
*/
struct module *owner;
- int (*send)(struct device *dev, const void *message, size_t size);
- int (*recv)(struct device *dev, void *buffer, size_t size);
+ int (*send)(struct device *dev, const void *message, size_t size,
+ unsigned long timeout_ms);
+ int (*recv)(struct device *dev, void *buffer, size_t size,
+ unsigned long timeout_ms);
ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id,
struct scatterlist *sg_in, size_t total_in_len,
struct scatterlist *sg_out);
diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h
index 087e3f649c52..f6e6ed529681 100644
--- a/include/drm/task_barrier.h
+++ b/include/drm/task_barrier.h
@@ -24,8 +24,8 @@
#include <linux/atomic.h>
/*
- * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
- * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ * Reusable 2 PHASE task barrier (rendez-vous point) implementation for N tasks.
+ * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/
*/
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 8b113c384236..0223a41a64b2 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -355,8 +355,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
void ttm_bo_put(struct ttm_buffer_object *bo);
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 56e82ba2d046..c22f30535c84 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -223,7 +223,7 @@ struct ttm_device {
* @funcs: Function table for the device.
* Constant after bo device init
*/
- struct ttm_device_funcs *funcs;
+ const struct ttm_device_funcs *funcs;
/**
* @sysman: Resource manager for the system domain.
@@ -287,7 +287,7 @@ static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
bdev->man_drv[type] = manager;
}
-int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
+int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
bool use_dma_alloc, bool use_dma32);
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 8ce14f9d202a..4490d43c63e3 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -61,18 +61,20 @@ struct ttm_pool_type {
* struct ttm_pool - Pool for all caching and orders
*
* @dev: the device we allocate pages for
+ * @nid: which numa node to use
* @use_dma_alloc: if coherent DMA allocations should be used
* @use_dma32: if GFP_DMA32 should be used
* @caching: pools for each caching/order
*/
struct ttm_pool {
struct device *dev;
+ int nid;
bool use_dma_alloc;
bool use_dma32;
struct {
- struct ttm_pool_type orders[MAX_ORDER + 1];
+ struct ttm_pool_type orders[NR_PAGE_ORDERS];
} caching[TTM_NUM_CACHING_TYPES];
};
@@ -81,7 +83,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
- bool use_dma_alloc, bool use_dma32);
+ int nid, bool use_dma_alloc, bool use_dma32);
void ttm_pool_fini(struct ttm_pool *pool);
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 977ca195a536..a4eff85b1f44 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -222,7 +222,7 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
struct ttm_tt *tt);
-
+unsigned long ttm_tt_pages_limit(void);
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
diff --git a/include/drm/xe_pciids.h b/include/drm/xe_pciids.h
new file mode 100644
index 000000000000..de1a344737bc
--- /dev/null
+++ b/include/drm/xe_pciids.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _XE_PCIIDS_H_
+#define _XE_PCIIDS_H_
+
+/*
+ * Lists below can be turned into initializers for a struct pci_device_id
+ * by defining INTEL_VGA_DEVICE:
+ *
+ * #define INTEL_VGA_DEVICE(id, info) { \
+ * 0x8086, id, \
+ * ~0, ~0, \
+ * 0x030000, 0xff0000, \
+ * (unsigned long) info }
+ *
+ * And then calling like:
+ *
+ * XE_TGL_12_GT1_IDS(INTEL_VGA_DEVICE, ## __VA_ARGS__)
+ *
+ * To turn them into something else, just provide a different macro passed as
+ * first argument.
+ */
+
+/* TGL */
+#define XE_TGL_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9A60, ## __VA_ARGS__), \
+ MACRO__(0x9A68, ## __VA_ARGS__), \
+ MACRO__(0x9A70, ## __VA_ARGS__)
+
+#define XE_TGL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9A40, ## __VA_ARGS__), \
+ MACRO__(0x9A49, ## __VA_ARGS__), \
+ MACRO__(0x9A59, ## __VA_ARGS__), \
+ MACRO__(0x9A78, ## __VA_ARGS__), \
+ MACRO__(0x9AC0, ## __VA_ARGS__), \
+ MACRO__(0x9AC9, ## __VA_ARGS__), \
+ MACRO__(0x9AD9, ## __VA_ARGS__), \
+ MACRO__(0x9AF8, ## __VA_ARGS__)
+
+#define XE_TGL_IDS(MACRO__, ...) \
+ XE_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* RKL */
+#define XE_RKL_IDS(MACRO__, ...) \
+ MACRO__(0x4C80, ## __VA_ARGS__), \
+ MACRO__(0x4C8A, ## __VA_ARGS__), \
+ MACRO__(0x4C8B, ## __VA_ARGS__), \
+ MACRO__(0x4C8C, ## __VA_ARGS__), \
+ MACRO__(0x4C90, ## __VA_ARGS__), \
+ MACRO__(0x4C9A, ## __VA_ARGS__)
+
+/* DG1 */
+#define XE_DG1_IDS(MACRO__, ...) \
+ MACRO__(0x4905, ## __VA_ARGS__), \
+ MACRO__(0x4906, ## __VA_ARGS__), \
+ MACRO__(0x4907, ## __VA_ARGS__), \
+ MACRO__(0x4908, ## __VA_ARGS__), \
+ MACRO__(0x4909, ## __VA_ARGS__)
+
+/* ADL-S */
+#define XE_ADLS_IDS(MACRO__, ...) \
+ MACRO__(0x4680, ## __VA_ARGS__), \
+ MACRO__(0x4682, ## __VA_ARGS__), \
+ MACRO__(0x4688, ## __VA_ARGS__), \
+ MACRO__(0x468A, ## __VA_ARGS__), \
+ MACRO__(0x468B, ## __VA_ARGS__), \
+ MACRO__(0x4690, ## __VA_ARGS__), \
+ MACRO__(0x4692, ## __VA_ARGS__), \
+ MACRO__(0x4693, ## __VA_ARGS__)
+
+/* ADL-P */
+#define XE_ADLP_IDS(MACRO__, ...) \
+ MACRO__(0x46A0, ## __VA_ARGS__), \
+ MACRO__(0x46A1, ## __VA_ARGS__), \
+ MACRO__(0x46A2, ## __VA_ARGS__), \
+ MACRO__(0x46A3, ## __VA_ARGS__), \
+ MACRO__(0x46A6, ## __VA_ARGS__), \
+ MACRO__(0x46A8, ## __VA_ARGS__), \
+ MACRO__(0x46AA, ## __VA_ARGS__), \
+ MACRO__(0x462A, ## __VA_ARGS__), \
+ MACRO__(0x4626, ## __VA_ARGS__), \
+ MACRO__(0x4628, ## __VA_ARGS__), \
+ MACRO__(0x46B0, ## __VA_ARGS__), \
+ MACRO__(0x46B1, ## __VA_ARGS__), \
+ MACRO__(0x46B2, ## __VA_ARGS__), \
+ MACRO__(0x46B3, ## __VA_ARGS__), \
+ MACRO__(0x46C0, ## __VA_ARGS__), \
+ MACRO__(0x46C1, ## __VA_ARGS__), \
+ MACRO__(0x46C2, ## __VA_ARGS__), \
+ MACRO__(0x46C3, ## __VA_ARGS__)
+
+/* ADL-N */
+#define XE_ADLN_IDS(MACRO__, ...) \
+ MACRO__(0x46D0, ## __VA_ARGS__), \
+ MACRO__(0x46D1, ## __VA_ARGS__), \
+ MACRO__(0x46D2, ## __VA_ARGS__)
+
+/* RPL-S */
+#define XE_RPLS_IDS(MACRO__, ...) \
+ MACRO__(0xA780, ## __VA_ARGS__), \
+ MACRO__(0xA781, ## __VA_ARGS__), \
+ MACRO__(0xA782, ## __VA_ARGS__), \
+ MACRO__(0xA783, ## __VA_ARGS__), \
+ MACRO__(0xA788, ## __VA_ARGS__), \
+ MACRO__(0xA789, ## __VA_ARGS__), \
+ MACRO__(0xA78A, ## __VA_ARGS__), \
+ MACRO__(0xA78B, ## __VA_ARGS__)
+
+/* RPL-U */
+#define XE_RPLU_IDS(MACRO__, ...) \
+ MACRO__(0xA721, ## __VA_ARGS__), \
+ MACRO__(0xA7A1, ## __VA_ARGS__), \
+ MACRO__(0xA7A9, ## __VA_ARGS__), \
+ MACRO__(0xA7AC, ## __VA_ARGS__), \
+ MACRO__(0xA7AD, ## __VA_ARGS__)
+
+/* RPL-P */
+#define XE_RPLP_IDS(MACRO__, ...) \
+ XE_RPLU_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0xA720, ## __VA_ARGS__), \
+ MACRO__(0xA7A0, ## __VA_ARGS__), \
+ MACRO__(0xA7A8, ## __VA_ARGS__), \
+ MACRO__(0xA7AA, ## __VA_ARGS__), \
+ MACRO__(0xA7AB, ## __VA_ARGS__)
+
+/* DG2 */
+#define XE_DG2_G10_IDS(MACRO__, ...) \
+ MACRO__(0x5690, ## __VA_ARGS__), \
+ MACRO__(0x5691, ## __VA_ARGS__), \
+ MACRO__(0x5692, ## __VA_ARGS__), \
+ MACRO__(0x56A0, ## __VA_ARGS__), \
+ MACRO__(0x56A1, ## __VA_ARGS__), \
+ MACRO__(0x56A2, ## __VA_ARGS__)
+
+#define XE_DG2_G11_IDS(MACRO__, ...) \
+ MACRO__(0x5693, ## __VA_ARGS__), \
+ MACRO__(0x5694, ## __VA_ARGS__), \
+ MACRO__(0x5695, ## __VA_ARGS__), \
+ MACRO__(0x56A5, ## __VA_ARGS__), \
+ MACRO__(0x56A6, ## __VA_ARGS__), \
+ MACRO__(0x56B0, ## __VA_ARGS__), \
+ MACRO__(0x56B1, ## __VA_ARGS__), \
+ MACRO__(0x56BA, ## __VA_ARGS__), \
+ MACRO__(0x56BB, ## __VA_ARGS__), \
+ MACRO__(0x56BC, ## __VA_ARGS__), \
+ MACRO__(0x56BD, ## __VA_ARGS__)
+
+#define XE_DG2_G12_IDS(MACRO__, ...) \
+ MACRO__(0x5696, ## __VA_ARGS__), \
+ MACRO__(0x5697, ## __VA_ARGS__), \
+ MACRO__(0x56A3, ## __VA_ARGS__), \
+ MACRO__(0x56A4, ## __VA_ARGS__), \
+ MACRO__(0x56B2, ## __VA_ARGS__), \
+ MACRO__(0x56B3, ## __VA_ARGS__)
+
+#define XE_DG2_IDS(MACRO__, ...) \
+ XE_DG2_G10_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_DG2_G11_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_DG2_G12_IDS(MACRO__, ## __VA_ARGS__)
+
+#define XE_ATS_M150_IDS(MACRO__, ...) \
+ MACRO__(0x56C0, ## __VA_ARGS__), \
+ MACRO__(0x56C2, ## __VA_ARGS__)
+
+#define XE_ATS_M75_IDS(MACRO__, ...) \
+ MACRO__(0x56C1, ## __VA_ARGS__)
+
+#define XE_ATS_M_IDS(MACRO__, ...) \
+ XE_ATS_M150_IDS(MACRO__, ## __VA_ARGS__),\
+ XE_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
+
+/* MTL / ARL */
+#define XE_MTL_IDS(MACRO__, ...) \
+ MACRO__(0x7D40, ## __VA_ARGS__), \
+ MACRO__(0x7D45, ## __VA_ARGS__), \
+ MACRO__(0x7D55, ## __VA_ARGS__), \
+ MACRO__(0x7D60, ## __VA_ARGS__), \
+ MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0x7DD5, ## __VA_ARGS__)
+
+#define XE_LNL_IDS(MACRO__, ...) \
+ MACRO__(0x6420, ## __VA_ARGS__), \
+ MACRO__(0x64A0, ## __VA_ARGS__), \
+ MACRO__(0x64B0, ## __VA_ARGS__)
+
+#endif
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index 802495b20276..51e0f6059410 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -193,6 +193,7 @@
#define QCOM_ID_SDA439 363
#define QCOM_ID_SDA429 364
#define QCOM_ID_SM7150 365
+#define QCOM_ID_SM7150P 366
#define QCOM_ID_IPQ8070 375
#define QCOM_ID_IPQ8071 376
#define QCOM_ID_QM215 386
@@ -203,6 +204,9 @@
#define QCOM_ID_SM6125 394
#define QCOM_ID_IPQ8070A 395
#define QCOM_ID_IPQ8071A 396
+#define QCOM_ID_IPQ8172 397
+#define QCOM_ID_IPQ8173 398
+#define QCOM_ID_IPQ8174 399
#define QCOM_ID_IPQ6018 402
#define QCOM_ID_IPQ6028 403
#define QCOM_ID_SDM429W 416
@@ -215,7 +219,11 @@
#define QCOM_ID_SDA429W 437
#define QCOM_ID_SM8350 439
#define QCOM_ID_QCM2290 441
+#define QCOM_ID_SM7125 443
#define QCOM_ID_SM6115 444
+#define QCOM_ID_IPQ5010 446
+#define QCOM_ID_IPQ5018 447
+#define QCOM_ID_IPQ5028 448
#define QCOM_ID_SC8280XP 449
#define QCOM_ID_IPQ6005 453
#define QCOM_ID_QRB5165 455
@@ -229,6 +237,10 @@
#define QCOM_ID_SM8450_3 482
#define QCOM_ID_SC7280 487
#define QCOM_ID_SC7180P 495
+#define QCOM_ID_QCM6490 497
+#define QCOM_ID_IPQ5000 503
+#define QCOM_ID_IPQ0509 504
+#define QCOM_ID_IPQ0518 505
#define QCOM_ID_SM6375 507
#define QCOM_ID_IPQ9514 510
#define QCOM_ID_IPQ9550 511
@@ -236,18 +248,24 @@
#define QCOM_ID_IPQ9570 513
#define QCOM_ID_IPQ9574 514
#define QCOM_ID_SM8550 519
+#define QCOM_ID_IPQ5016 520
#define QCOM_ID_IPQ9510 521
#define QCOM_ID_QRB4210 523
#define QCOM_ID_QRB2210 524
#define QCOM_ID_SA8775P 534
#define QCOM_ID_QRU1000 539
#define QCOM_ID_QDU1000 545
+#define QCOM_ID_SM8650 557
+#define QCOM_ID_SM4450 568
#define QCOM_ID_QDU1010 587
#define QCOM_ID_QRU1032 588
#define QCOM_ID_QRU1052 589
#define QCOM_ID_QRU1062 590
#define QCOM_ID_IPQ5332 592
#define QCOM_ID_IPQ5322 593
+#define QCOM_ID_IPQ5312 594
+#define QCOM_ID_IPQ5302 595
+#define QCOM_ID_IPQ5300 624
/*
* The board type and revision information, used by Qualcomm bootloaders and
diff --git a/include/dt-bindings/ata/ahci.h b/include/dt-bindings/ata/ahci.h
index 77997b35612c..b3f3b7cf9af8 100644
--- a/include/dt-bindings/ata/ahci.h
+++ b/include/dt-bindings/ata/ahci.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause */
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
/*
* This header provides constants for most AHCI bindings.
*/
diff --git a/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
new file mode 100644
index 000000000000..06f198ee7623
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ *
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
+ */
+
+#ifndef __A1_PERIPHERALS_CLKC_H
+#define __A1_PERIPHERALS_CLKC_H
+
+#define CLKID_XTAL_IN 0
+#define CLKID_FIXPLL_IN 1
+#define CLKID_USB_PHY_IN 2
+#define CLKID_USB_CTRL_IN 3
+#define CLKID_HIFIPLL_IN 4
+#define CLKID_SYSPLL_IN 5
+#define CLKID_DDS_IN 6
+#define CLKID_SYS 7
+#define CLKID_CLKTREE 8
+#define CLKID_RESET_CTRL 9
+#define CLKID_ANALOG_CTRL 10
+#define CLKID_PWR_CTRL 11
+#define CLKID_PAD_CTRL 12
+#define CLKID_SYS_CTRL 13
+#define CLKID_TEMP_SENSOR 14
+#define CLKID_AM2AXI_DIV 15
+#define CLKID_SPICC_B 16
+#define CLKID_SPICC_A 17
+#define CLKID_MSR 18
+#define CLKID_AUDIO 19
+#define CLKID_JTAG_CTRL 20
+#define CLKID_SARADC_EN 21
+#define CLKID_PWM_EF 22
+#define CLKID_PWM_CD 23
+#define CLKID_PWM_AB 24
+#define CLKID_CEC 25
+#define CLKID_I2C_S 26
+#define CLKID_IR_CTRL 27
+#define CLKID_I2C_M_D 28
+#define CLKID_I2C_M_C 29
+#define CLKID_I2C_M_B 30
+#define CLKID_I2C_M_A 31
+#define CLKID_ACODEC 32
+#define CLKID_OTP 33
+#define CLKID_SD_EMMC_A 34
+#define CLKID_USB_PHY 35
+#define CLKID_USB_CTRL 36
+#define CLKID_SYS_DSPB 37
+#define CLKID_SYS_DSPA 38
+#define CLKID_DMA 39
+#define CLKID_IRQ_CTRL 40
+#define CLKID_NIC 41
+#define CLKID_GIC 42
+#define CLKID_UART_C 43
+#define CLKID_UART_B 44
+#define CLKID_UART_A 45
+#define CLKID_SYS_PSRAM 46
+#define CLKID_RSA 47
+#define CLKID_CORESIGHT 48
+#define CLKID_AM2AXI_VAD 49
+#define CLKID_AUDIO_VAD 50
+#define CLKID_AXI_DMC 51
+#define CLKID_AXI_PSRAM 52
+#define CLKID_RAMB 53
+#define CLKID_RAMA 54
+#define CLKID_AXI_SPIFC 55
+#define CLKID_AXI_NIC 56
+#define CLKID_AXI_DMA 57
+#define CLKID_CPU_CTRL 58
+#define CLKID_ROM 59
+#define CLKID_PROC_I2C 60
+#define CLKID_DSPA_SEL 61
+#define CLKID_DSPB_SEL 62
+#define CLKID_DSPA_EN 63
+#define CLKID_DSPA_EN_NIC 64
+#define CLKID_DSPB_EN 65
+#define CLKID_DSPB_EN_NIC 66
+#define CLKID_RTC 67
+#define CLKID_CECA_32K 68
+#define CLKID_CECB_32K 69
+#define CLKID_24M 70
+#define CLKID_12M 71
+#define CLKID_FCLK_DIV2_DIVN 72
+#define CLKID_GEN 73
+#define CLKID_SARADC_SEL 74
+#define CLKID_SARADC 75
+#define CLKID_PWM_A 76
+#define CLKID_PWM_B 77
+#define CLKID_PWM_C 78
+#define CLKID_PWM_D 79
+#define CLKID_PWM_E 80
+#define CLKID_PWM_F 81
+#define CLKID_SPICC 82
+#define CLKID_TS 83
+#define CLKID_SPIFC 84
+#define CLKID_USB_BUS 85
+#define CLKID_SD_EMMC 86
+#define CLKID_PSRAM 87
+#define CLKID_DMC 88
+#define CLKID_SYS_A_SEL 89
+#define CLKID_SYS_A_DIV 90
+#define CLKID_SYS_A 91
+#define CLKID_SYS_B_SEL 92
+#define CLKID_SYS_B_DIV 93
+#define CLKID_SYS_B 94
+#define CLKID_DSPA_A_SEL 95
+#define CLKID_DSPA_A_DIV 96
+#define CLKID_DSPA_A 97
+#define CLKID_DSPA_B_SEL 98
+#define CLKID_DSPA_B_DIV 99
+#define CLKID_DSPA_B 100
+#define CLKID_DSPB_A_SEL 101
+#define CLKID_DSPB_A_DIV 102
+#define CLKID_DSPB_A 103
+#define CLKID_DSPB_B_SEL 104
+#define CLKID_DSPB_B_DIV 105
+#define CLKID_DSPB_B 106
+#define CLKID_RTC_32K_IN 107
+#define CLKID_RTC_32K_DIV 108
+#define CLKID_RTC_32K_XTAL 109
+#define CLKID_RTC_32K_SEL 110
+#define CLKID_CECB_32K_IN 111
+#define CLKID_CECB_32K_DIV 112
+#define CLKID_CECB_32K_SEL_PRE 113
+#define CLKID_CECB_32K_SEL 114
+#define CLKID_CECA_32K_IN 115
+#define CLKID_CECA_32K_DIV 116
+#define CLKID_CECA_32K_SEL_PRE 117
+#define CLKID_CECA_32K_SEL 118
+#define CLKID_DIV2_PRE 119
+#define CLKID_24M_DIV2 120
+#define CLKID_GEN_SEL 121
+#define CLKID_GEN_DIV 122
+#define CLKID_SARADC_DIV 123
+#define CLKID_PWM_A_SEL 124
+#define CLKID_PWM_A_DIV 125
+#define CLKID_PWM_B_SEL 126
+#define CLKID_PWM_B_DIV 127
+#define CLKID_PWM_C_SEL 128
+#define CLKID_PWM_C_DIV 129
+#define CLKID_PWM_D_SEL 130
+#define CLKID_PWM_D_DIV 131
+#define CLKID_PWM_E_SEL 132
+#define CLKID_PWM_E_DIV 133
+#define CLKID_PWM_F_SEL 134
+#define CLKID_PWM_F_DIV 135
+#define CLKID_SPICC_SEL 136
+#define CLKID_SPICC_DIV 137
+#define CLKID_SPICC_SEL2 138
+#define CLKID_TS_DIV 139
+#define CLKID_SPIFC_SEL 140
+#define CLKID_SPIFC_DIV 141
+#define CLKID_SPIFC_SEL2 142
+#define CLKID_USB_BUS_SEL 143
+#define CLKID_USB_BUS_DIV 144
+#define CLKID_SD_EMMC_SEL 145
+#define CLKID_SD_EMMC_DIV 146
+#define CLKID_SD_EMMC_SEL2 147
+#define CLKID_PSRAM_SEL 148
+#define CLKID_PSRAM_DIV 149
+#define CLKID_PSRAM_SEL2 150
+#define CLKID_DMC_SEL 151
+#define CLKID_DMC_DIV 152
+#define CLKID_DMC_SEL2 153
+
+#endif /* __A1_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,a1-pll-clkc.h b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
new file mode 100644
index 000000000000..2b660c0f2c9f
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ *
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
+ */
+
+#ifndef __A1_PLL_CLKC_H
+#define __A1_PLL_CLKC_H
+
+#define CLKID_FIXED_PLL_DCO 0
+#define CLKID_FIXED_PLL 1
+#define CLKID_FCLK_DIV2_DIV 2
+#define CLKID_FCLK_DIV3_DIV 3
+#define CLKID_FCLK_DIV5_DIV 4
+#define CLKID_FCLK_DIV7_DIV 5
+#define CLKID_FCLK_DIV2 6
+#define CLKID_FCLK_DIV3 7
+#define CLKID_FCLK_DIV5 8
+#define CLKID_FCLK_DIV7 9
+#define CLKID_HIFI_PLL 10
+
+#endif /* __A1_PLL_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h
new file mode 100644
index 000000000000..861a331963ac
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved.
+ * Author: Yu Tu <yu.tu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H
+
+#define CLKID_RTC_32K_CLKIN 0
+#define CLKID_RTC_32K_DIV 1
+#define CLKID_RTC_32K_SEL 2
+#define CLKID_RTC_32K_XATL 3
+#define CLKID_RTC 4
+#define CLKID_SYS_CLK_B_SEL 5
+#define CLKID_SYS_CLK_B_DIV 6
+#define CLKID_SYS_CLK_B 7
+#define CLKID_SYS_CLK_A_SEL 8
+#define CLKID_SYS_CLK_A_DIV 9
+#define CLKID_SYS_CLK_A 10
+#define CLKID_SYS 11
+#define CLKID_CECA_32K_CLKIN 12
+#define CLKID_CECA_32K_DIV 13
+#define CLKID_CECA_32K_SEL_PRE 14
+#define CLKID_CECA_32K_SEL 15
+#define CLKID_CECA_32K_CLKOUT 16
+#define CLKID_CECB_32K_CLKIN 17
+#define CLKID_CECB_32K_DIV 18
+#define CLKID_CECB_32K_SEL_PRE 19
+#define CLKID_CECB_32K_SEL 20
+#define CLKID_CECB_32K_CLKOUT 21
+#define CLKID_SC_CLK_SEL 22
+#define CLKID_SC_CLK_DIV 23
+#define CLKID_SC 24
+#define CLKID_12_24M 25
+#define CLKID_12M_CLK_DIV 26
+#define CLKID_12_24M_CLK_SEL 27
+#define CLKID_VID_PLL_DIV 28
+#define CLKID_VID_PLL_SEL 29
+#define CLKID_VID_PLL 30
+#define CLKID_VCLK_SEL 31
+#define CLKID_VCLK2_SEL 32
+#define CLKID_VCLK_INPUT 33
+#define CLKID_VCLK2_INPUT 34
+#define CLKID_VCLK_DIV 35
+#define CLKID_VCLK2_DIV 36
+#define CLKID_VCLK 37
+#define CLKID_VCLK2 38
+#define CLKID_VCLK_DIV1 39
+#define CLKID_VCLK_DIV2_EN 40
+#define CLKID_VCLK_DIV4_EN 41
+#define CLKID_VCLK_DIV6_EN 42
+#define CLKID_VCLK_DIV12_EN 43
+#define CLKID_VCLK2_DIV1 44
+#define CLKID_VCLK2_DIV2_EN 45
+#define CLKID_VCLK2_DIV4_EN 46
+#define CLKID_VCLK2_DIV6_EN 47
+#define CLKID_VCLK2_DIV12_EN 48
+#define CLKID_VCLK_DIV2 49
+#define CLKID_VCLK_DIV4 50
+#define CLKID_VCLK_DIV6 51
+#define CLKID_VCLK_DIV12 52
+#define CLKID_VCLK2_DIV2 53
+#define CLKID_VCLK2_DIV4 54
+#define CLKID_VCLK2_DIV6 55
+#define CLKID_VCLK2_DIV12 56
+#define CLKID_CTS_ENCI_SEL 57
+#define CLKID_CTS_ENCP_SEL 58
+#define CLKID_CTS_VDAC_SEL 59
+#define CLKID_HDMI_TX_SEL 60
+#define CLKID_CTS_ENCI 61
+#define CLKID_CTS_ENCP 62
+#define CLKID_CTS_VDAC 63
+#define CLKID_HDMI_TX 64
+#define CLKID_HDMI_SEL 65
+#define CLKID_HDMI_DIV 66
+#define CLKID_HDMI 67
+#define CLKID_TS_CLK_DIV 68
+#define CLKID_TS 69
+#define CLKID_MALI_0_SEL 70
+#define CLKID_MALI_0_DIV 71
+#define CLKID_MALI_0 72
+#define CLKID_MALI_1_SEL 73
+#define CLKID_MALI_1_DIV 74
+#define CLKID_MALI_1 75
+#define CLKID_MALI_SEL 76
+#define CLKID_VDEC_P0_SEL 77
+#define CLKID_VDEC_P0_DIV 78
+#define CLKID_VDEC_P0 79
+#define CLKID_VDEC_P1_SEL 80
+#define CLKID_VDEC_P1_DIV 81
+#define CLKID_VDEC_P1 82
+#define CLKID_VDEC_SEL 83
+#define CLKID_HEVCF_P0_SEL 84
+#define CLKID_HEVCF_P0_DIV 85
+#define CLKID_HEVCF_P0 86
+#define CLKID_HEVCF_P1_SEL 87
+#define CLKID_HEVCF_P1_DIV 88
+#define CLKID_HEVCF_P1 89
+#define CLKID_HEVCF_SEL 90
+#define CLKID_VPU_0_SEL 91
+#define CLKID_VPU_0_DIV 92
+#define CLKID_VPU_0 93
+#define CLKID_VPU_1_SEL 94
+#define CLKID_VPU_1_DIV 95
+#define CLKID_VPU_1 96
+#define CLKID_VPU 97
+#define CLKID_VPU_CLKB_TMP_SEL 98
+#define CLKID_VPU_CLKB_TMP_DIV 99
+#define CLKID_VPU_CLKB_TMP 100
+#define CLKID_VPU_CLKB_DIV 101
+#define CLKID_VPU_CLKB 102
+#define CLKID_VPU_CLKC_P0_SEL 103
+#define CLKID_VPU_CLKC_P0_DIV 104
+#define CLKID_VPU_CLKC_P0 105
+#define CLKID_VPU_CLKC_P1_SEL 106
+#define CLKID_VPU_CLKC_P1_DIV 107
+#define CLKID_VPU_CLKC_P1 108
+#define CLKID_VPU_CLKC_SEL 109
+#define CLKID_VAPB_0_SEL 110
+#define CLKID_VAPB_0_DIV 111
+#define CLKID_VAPB_0 112
+#define CLKID_VAPB_1_SEL 113
+#define CLKID_VAPB_1_DIV 114
+#define CLKID_VAPB_1 115
+#define CLKID_VAPB 116
+#define CLKID_GE2D 117
+#define CLKID_VDIN_MEAS_SEL 118
+#define CLKID_VDIN_MEAS_DIV 119
+#define CLKID_VDIN_MEAS 120
+#define CLKID_SD_EMMC_C_CLK_SEL 121
+#define CLKID_SD_EMMC_C_CLK_DIV 122
+#define CLKID_SD_EMMC_C 123
+#define CLKID_SD_EMMC_A_CLK_SEL 124
+#define CLKID_SD_EMMC_A_CLK_DIV 125
+#define CLKID_SD_EMMC_A 126
+#define CLKID_SD_EMMC_B_CLK_SEL 127
+#define CLKID_SD_EMMC_B_CLK_DIV 128
+#define CLKID_SD_EMMC_B 129
+#define CLKID_SPICC0_SEL 130
+#define CLKID_SPICC0_DIV 131
+#define CLKID_SPICC0_EN 132
+#define CLKID_PWM_A_SEL 133
+#define CLKID_PWM_A_DIV 134
+#define CLKID_PWM_A 135
+#define CLKID_PWM_B_SEL 136
+#define CLKID_PWM_B_DIV 137
+#define CLKID_PWM_B 138
+#define CLKID_PWM_C_SEL 139
+#define CLKID_PWM_C_DIV 140
+#define CLKID_PWM_C 141
+#define CLKID_PWM_D_SEL 142
+#define CLKID_PWM_D_DIV 143
+#define CLKID_PWM_D 144
+#define CLKID_PWM_E_SEL 145
+#define CLKID_PWM_E_DIV 146
+#define CLKID_PWM_E 147
+#define CLKID_PWM_F_SEL 148
+#define CLKID_PWM_F_DIV 149
+#define CLKID_PWM_F 150
+#define CLKID_PWM_G_SEL 151
+#define CLKID_PWM_G_DIV 152
+#define CLKID_PWM_G 153
+#define CLKID_PWM_H_SEL 154
+#define CLKID_PWM_H_DIV 155
+#define CLKID_PWM_H 156
+#define CLKID_PWM_I_SEL 157
+#define CLKID_PWM_I_DIV 158
+#define CLKID_PWM_I 159
+#define CLKID_PWM_J_SEL 160
+#define CLKID_PWM_J_DIV 161
+#define CLKID_PWM_J 162
+#define CLKID_SARADC_SEL 163
+#define CLKID_SARADC_DIV 164
+#define CLKID_SARADC 165
+#define CLKID_GEN_SEL 166
+#define CLKID_GEN_DIV 167
+#define CLKID_GEN 168
+#define CLKID_DDR 169
+#define CLKID_DOS 170
+#define CLKID_ETHPHY 171
+#define CLKID_MALI 172
+#define CLKID_AOCPU 173
+#define CLKID_AUCPU 174
+#define CLKID_CEC 175
+#define CLKID_SDEMMC_A 176
+#define CLKID_SDEMMC_B 177
+#define CLKID_NAND 178
+#define CLKID_SMARTCARD 179
+#define CLKID_ACODEC 180
+#define CLKID_SPIFC 181
+#define CLKID_MSR 182
+#define CLKID_IR_CTRL 183
+#define CLKID_AUDIO 184
+#define CLKID_ETH 185
+#define CLKID_UART_A 186
+#define CLKID_UART_B 187
+#define CLKID_UART_C 188
+#define CLKID_UART_D 189
+#define CLKID_UART_E 190
+#define CLKID_AIFIFO 191
+#define CLKID_TS_DDR 192
+#define CLKID_TS_PLL 193
+#define CLKID_G2D 194
+#define CLKID_SPICC0 195
+#define CLKID_SPICC1 196
+#define CLKID_USB 197
+#define CLKID_I2C_M_A 198
+#define CLKID_I2C_M_B 199
+#define CLKID_I2C_M_C 200
+#define CLKID_I2C_M_D 201
+#define CLKID_I2C_M_E 202
+#define CLKID_HDMITX_APB 203
+#define CLKID_I2C_S_A 204
+#define CLKID_USB1_TO_DDR 205
+#define CLKID_HDCP22 206
+#define CLKID_MMC_APB 207
+#define CLKID_RSA 208
+#define CLKID_CPU_DEBUG 209
+#define CLKID_VPU_INTR 210
+#define CLKID_DEMOD 211
+#define CLKID_SAR_ADC 212
+#define CLKID_GIC 213
+#define CLKID_PWM_AB 214
+#define CLKID_PWM_CD 215
+#define CLKID_PWM_EF 216
+#define CLKID_PWM_GH 217
+#define CLKID_PWM_IJ 218
+#define CLKID_HDCP22_ESMCLK_SEL 219
+#define CLKID_HDCP22_ESMCLK_DIV 220
+#define CLKID_HDCP22_ESMCLK 221
+#define CLKID_HDCP22_SKPCLK_SEL 222
+#define CLKID_HDCP22_SKPCLK_DIV 223
+#define CLKID_HDCP22_SKPCLK 224
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,s4-pll-clkc.h b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h
new file mode 100644
index 000000000000..af9f110f8b62
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved.
+ * Author: Yu Tu <yu.tu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H
+
+#define CLKID_FIXED_PLL_DCO 0
+#define CLKID_FIXED_PLL 1
+#define CLKID_FCLK_DIV2_DIV 2
+#define CLKID_FCLK_DIV2 3
+#define CLKID_FCLK_DIV3_DIV 4
+#define CLKID_FCLK_DIV3 5
+#define CLKID_FCLK_DIV4_DIV 6
+#define CLKID_FCLK_DIV4 7
+#define CLKID_FCLK_DIV5_DIV 8
+#define CLKID_FCLK_DIV5 9
+#define CLKID_FCLK_DIV7_DIV 10
+#define CLKID_FCLK_DIV7 11
+#define CLKID_FCLK_DIV2P5_DIV 12
+#define CLKID_FCLK_DIV2P5 13
+#define CLKID_GP0_PLL_DCO 14
+#define CLKID_GP0_PLL 15
+#define CLKID_HIFI_PLL_DCO 16
+#define CLKID_HIFI_PLL 17
+#define CLKID_HDMI_PLL_DCO 18
+#define CLKID_HDMI_PLL_OD 19
+#define CLKID_HDMI_PLL 20
+#define CLKID_MPLL_50M_DIV 21
+#define CLKID_MPLL_50M 22
+#define CLKID_MPLL_PREDIV 23
+#define CLKID_MPLL0_DIV 24
+#define CLKID_MPLL0 25
+#define CLKID_MPLL1_DIV 26
+#define CLKID_MPLL1 27
+#define CLKID_MPLL2_DIV 28
+#define CLKID_MPLL2 29
+#define CLKID_MPLL3_DIV 30
+#define CLKID_MPLL3 31
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H */
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index e149eee61588..712782177c90 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -90,7 +90,19 @@
/* Only list resets here that are not part of a clock gate + reset pair */
#define ASPEED_RESET_ADC 55
#define ASPEED_RESET_JTAG_MASTER2 54
+
+#define ASPEED_RESET_MAC4 53
+#define ASPEED_RESET_MAC3 52
+
+#define ASPEED_RESET_I3C5 45
+#define ASPEED_RESET_I3C4 44
+#define ASPEED_RESET_I3C3 43
+#define ASPEED_RESET_I3C2 42
+#define ASPEED_RESET_I3C1 41
+#define ASPEED_RESET_I3C0 40
+#define ASPEED_RESET_I3C 39
#define ASPEED_RESET_I3C_DMA 39
+
#define ASPEED_RESET_PWM 37
#define ASPEED_RESET_PECI 36
#define ASPEED_RESET_MII 35
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index f561f5c5ef8f..08c82c22fa5f 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -37,6 +37,26 @@
#define AUD_CLKID_SPDIFIN_CLK 56
#define AUD_CLKID_PDM_DCLK 57
#define AUD_CLKID_PDM_SYSCLK 58
+#define AUD_CLKID_MST_A_MCLK_SEL 59
+#define AUD_CLKID_MST_B_MCLK_SEL 60
+#define AUD_CLKID_MST_C_MCLK_SEL 61
+#define AUD_CLKID_MST_D_MCLK_SEL 62
+#define AUD_CLKID_MST_E_MCLK_SEL 63
+#define AUD_CLKID_MST_F_MCLK_SEL 64
+#define AUD_CLKID_MST_A_MCLK_DIV 65
+#define AUD_CLKID_MST_B_MCLK_DIV 66
+#define AUD_CLKID_MST_C_MCLK_DIV 67
+#define AUD_CLKID_MST_D_MCLK_DIV 68
+#define AUD_CLKID_MST_E_MCLK_DIV 69
+#define AUD_CLKID_MST_F_MCLK_DIV 70
+#define AUD_CLKID_SPDIFOUT_CLK_SEL 71
+#define AUD_CLKID_SPDIFOUT_CLK_DIV 72
+#define AUD_CLKID_SPDIFIN_CLK_SEL 73
+#define AUD_CLKID_SPDIFIN_CLK_DIV 74
+#define AUD_CLKID_PDM_DCLK_SEL 75
+#define AUD_CLKID_PDM_DCLK_DIV 76
+#define AUD_CLKID_PDM_SYSCLK_SEL 77
+#define AUD_CLKID_PDM_SYSCLK_DIV 78
#define AUD_CLKID_MST_A_SCLK 79
#define AUD_CLKID_MST_B_SCLK 80
#define AUD_CLKID_MST_C_SCLK 81
@@ -49,6 +69,30 @@
#define AUD_CLKID_MST_D_LRCLK 89
#define AUD_CLKID_MST_E_LRCLK 90
#define AUD_CLKID_MST_F_LRCLK 91
+#define AUD_CLKID_MST_A_SCLK_PRE_EN 92
+#define AUD_CLKID_MST_B_SCLK_PRE_EN 93
+#define AUD_CLKID_MST_C_SCLK_PRE_EN 94
+#define AUD_CLKID_MST_D_SCLK_PRE_EN 95
+#define AUD_CLKID_MST_E_SCLK_PRE_EN 96
+#define AUD_CLKID_MST_F_SCLK_PRE_EN 97
+#define AUD_CLKID_MST_A_SCLK_DIV 98
+#define AUD_CLKID_MST_B_SCLK_DIV 99
+#define AUD_CLKID_MST_C_SCLK_DIV 100
+#define AUD_CLKID_MST_D_SCLK_DIV 101
+#define AUD_CLKID_MST_E_SCLK_DIV 102
+#define AUD_CLKID_MST_F_SCLK_DIV 103
+#define AUD_CLKID_MST_A_SCLK_POST_EN 104
+#define AUD_CLKID_MST_B_SCLK_POST_EN 105
+#define AUD_CLKID_MST_C_SCLK_POST_EN 106
+#define AUD_CLKID_MST_D_SCLK_POST_EN 107
+#define AUD_CLKID_MST_E_SCLK_POST_EN 108
+#define AUD_CLKID_MST_F_SCLK_POST_EN 109
+#define AUD_CLKID_MST_A_LRCLK_DIV 110
+#define AUD_CLKID_MST_B_LRCLK_DIV 111
+#define AUD_CLKID_MST_C_LRCLK_DIV 112
+#define AUD_CLKID_MST_D_LRCLK_DIV 113
+#define AUD_CLKID_MST_E_LRCLK_DIV 114
+#define AUD_CLKID_MST_F_LRCLK_DIV 115
#define AUD_CLKID_TDMIN_A_SCLK_SEL 116
#define AUD_CLKID_TDMIN_B_SCLK_SEL 117
#define AUD_CLKID_TDMIN_C_SCLK_SEL 118
@@ -70,8 +114,24 @@
#define AUD_CLKID_TDMOUT_A_LRCLK 134
#define AUD_CLKID_TDMOUT_B_LRCLK 135
#define AUD_CLKID_TDMOUT_C_LRCLK 136
+#define AUD_CLKID_TDMIN_A_SCLK_PRE_EN 137
+#define AUD_CLKID_TDMIN_B_SCLK_PRE_EN 138
+#define AUD_CLKID_TDMIN_C_SCLK_PRE_EN 139
+#define AUD_CLKID_TDMIN_LB_SCLK_PRE_EN 140
+#define AUD_CLKID_TDMOUT_A_SCLK_PRE_EN 141
+#define AUD_CLKID_TDMOUT_B_SCLK_PRE_EN 142
+#define AUD_CLKID_TDMOUT_C_SCLK_PRE_EN 143
+#define AUD_CLKID_TDMIN_A_SCLK_POST_EN 144
+#define AUD_CLKID_TDMIN_B_SCLK_POST_EN 145
+#define AUD_CLKID_TDMIN_C_SCLK_POST_EN 146
+#define AUD_CLKID_TDMIN_LB_SCLK_POST_EN 147
+#define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148
+#define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149
+#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
#define AUD_CLKID_SPDIFOUT_B 151
#define AUD_CLKID_SPDIFOUT_B_CLK 152
+#define AUD_CLKID_SPDIFOUT_B_CLK_SEL 153
+#define AUD_CLKID_SPDIFOUT_B_CLK_DIV 154
#define AUD_CLKID_TDM_MCLK_PAD0 155
#define AUD_CLKID_TDM_MCLK_PAD1 156
#define AUD_CLKID_TDM_LRCLK_PAD0 157
@@ -90,5 +150,10 @@
#define AUD_CLKID_FRDDR_D 170
#define AUD_CLKID_TODDR_D 171
#define AUD_CLKID_LOOPBACK_B 172
+#define AUD_CLKID_CLK81_EN 173
+#define AUD_CLKID_SYSCLK_A_DIV 174
+#define AUD_CLKID_SYSCLK_B_DIV 175
+#define AUD_CLKID_SYSCLK_A_EN 176
+#define AUD_CLKID_SYSCLK_B_EN 177
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index 93752ea107e3..442162822b88 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -16,6 +16,8 @@
#define CLKID_FCLK_DIV5 5
#define CLKID_FCLK_DIV7 6
#define CLKID_GP0_PLL 7
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
#define CLKID_CLK81 10
#define CLKID_MPLL0 11
#define CLKID_MPLL1 12
@@ -67,23 +69,66 @@
#define CLKID_AO_I2C 58
#define CLKID_SD_EMMC_B_CLK0 59
#define CLKID_SD_EMMC_C_CLK0 60
+#define CLKID_SD_EMMC_B_CLK0_SEL 61
+#define CLKID_SD_EMMC_B_CLK0_DIV 62
+#define CLKID_SD_EMMC_C_CLK0_SEL 63
+#define CLKID_SD_EMMC_C_CLK0_DIV 64
+#define CLKID_MPLL0_DIV 65
+#define CLKID_MPLL1_DIV 66
+#define CLKID_MPLL2_DIV 67
+#define CLKID_MPLL3_DIV 68
#define CLKID_HIFI_PLL 69
+#define CLKID_MPLL_PREDIV 70
+#define CLKID_FCLK_DIV2_DIV 71
+#define CLKID_FCLK_DIV3_DIV 72
+#define CLKID_FCLK_DIV4_DIV 73
+#define CLKID_FCLK_DIV5_DIV 74
+#define CLKID_FCLK_DIV7_DIV 75
+#define CLKID_PCIE_PLL 76
+#define CLKID_PCIE_MUX 77
+#define CLKID_PCIE_REF 78
#define CLKID_PCIE_CML_EN0 79
#define CLKID_PCIE_CML_EN1 80
+#define CLKID_GEN_CLK_SEL 82
+#define CLKID_GEN_CLK_DIV 83
#define CLKID_GEN_CLK 84
+#define CLKID_SYS_PLL_DCO 85
+#define CLKID_FIXED_PLL_DCO 86
+#define CLKID_GP0_PLL_DCO 87
+#define CLKID_HIFI_PLL_DCO 88
+#define CLKID_PCIE_PLL_DCO 89
+#define CLKID_PCIE_PLL_OD 90
+#define CLKID_VPU_0_DIV 91
#define CLKID_VPU_0_SEL 92
#define CLKID_VPU_0 93
+#define CLKID_VPU_1_DIV 94
#define CLKID_VPU_1_SEL 95
#define CLKID_VPU_1 96
#define CLKID_VPU 97
+#define CLKID_VAPB_0_DIV 98
#define CLKID_VAPB_0_SEL 99
#define CLKID_VAPB_0 100
+#define CLKID_VAPB_1_DIV 101
#define CLKID_VAPB_1_SEL 102
#define CLKID_VAPB_1 103
#define CLKID_VAPB_SEL 104
#define CLKID_VAPB 105
#define CLKID_VCLK 106
#define CLKID_VCLK2 107
+#define CLKID_VCLK_SEL 108
+#define CLKID_VCLK2_SEL 109
+#define CLKID_VCLK_INPUT 110
+#define CLKID_VCLK2_INPUT 111
+#define CLKID_VCLK_DIV 112
+#define CLKID_VCLK2_DIV 113
+#define CLKID_VCLK_DIV2_EN 114
+#define CLKID_VCLK_DIV4_EN 115
+#define CLKID_VCLK_DIV6_EN 116
+#define CLKID_VCLK_DIV12_EN 117
+#define CLKID_VCLK2_DIV2_EN 118
+#define CLKID_VCLK2_DIV4_EN 119
+#define CLKID_VCLK2_DIV6_EN 120
+#define CLKID_VCLK2_DIV12_EN 121
#define CLKID_VCLK_DIV1 122
#define CLKID_VCLK_DIV2 123
#define CLKID_VCLK_DIV4 124
@@ -94,7 +139,10 @@
#define CLKID_VCLK2_DIV4 129
#define CLKID_VCLK2_DIV6 130
#define CLKID_VCLK2_DIV12 131
+#define CLKID_CTS_ENCL_SEL 132
#define CLKID_CTS_ENCL 133
+#define CLKID_VDIN_MEAS_SEL 134
+#define CLKID_VDIN_MEAS_DIV 135
#define CLKID_VDIN_MEAS 136
#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index fe8214017b46..cc7268151843 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -257,12 +257,6 @@
#define CLK_SCLK_MMC2 249
/*
- * Total number of clocks of main CMU.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define CLK_NR_CLKS 250
-
-/*
* CMU DMC
*/
@@ -284,12 +278,6 @@
#define CLK_DIV_DMCD 20
/*
- * Total number of clocks of main CMU.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define NR_CLKS_DMC 21
-
-/*
* CMU ISP
*/
@@ -344,10 +332,4 @@
#define CLK_ASYNCAXIM 46
#define CLK_SCLK_MPWM_ISP 47
-/*
- * Total number of clocks of CMU_ISP.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define NR_CLKS_ISP 48
-
#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index acbfbab875ec..4ebff79ed9e2 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -239,9 +239,6 @@
#define CLK_DIV_GDR 460
#define CLK_DIV_CORE2 461
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 462
-
/* Exynos4x12 ISP clocks */
#define CLK_ISP_FIMC_ISP 1
#define CLK_ISP_FIMC_DRC 2
@@ -275,6 +272,4 @@
#define CLK_ISP_DIV_MCUISP0 29
#define CLK_ISP_DIV_MCUISP1 30
-#define CLK_NR_ISP_CLKS 31
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index 4680da7357d3..2337c028bbe1 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -177,7 +177,4 @@
#define CLK_MOUT_MPLL 1029
#define CLK_MOUT_VPLLSRC 1030
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 1031
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */
diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h
index 98a58cbd81b2..dfde40ea40f0 100644
--- a/include/dt-bindings/clock/exynos5260-clk.h
+++ b/include/dt-bindings/clock/exynos5260-clk.h
@@ -137,8 +137,6 @@
#define PHYCLK_USBHOST20_PHY_CLK48MOHCI 122
#define PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 123
#define PHYCLK_USBDRD30_UDRD30_PHYCLOCK 124
-#define TOP_NR_CLK 125
-
/* List Of Clocks For CMU_EGL */
@@ -153,8 +151,6 @@
#define EGL_DOUT_ACLK_EGL 9
#define EGL_DOUT_EGL2 10
#define EGL_DOUT_EGL1 11
-#define EGL_NR_CLK 12
-
/* List Of Clocks For CMU_KFC */
@@ -168,8 +164,6 @@
#define KFC_DOUT_KFC_ATCLK 8
#define KFC_DOUT_KFC2 9
#define KFC_DOUT_KFC1 10
-#define KFC_NR_CLK 11
-
/* List Of Clocks For CMU_MIF */
@@ -200,8 +194,6 @@
#define MIF_CLK_INTMEM 25
#define MIF_SCLK_LPDDR3PHY_WRAP_U1 26
#define MIF_SCLK_LPDDR3PHY_WRAP_U0 27
-#define MIF_NR_CLK 28
-
/* List Of Clocks For CMU_G3D */
@@ -211,8 +203,6 @@
#define G3D_DOUT_ACLK_G3D 4
#define G3D_CLK_G3D_HPM 5
#define G3D_CLK_G3D 6
-#define G3D_NR_CLK 7
-
/* List Of Clocks For CMU_AUD */
@@ -231,8 +221,6 @@
#define AUD_SCLK_AUD_UART 13
#define AUD_SCLK_PCM 14
#define AUD_SCLK_I2S 15
-#define AUD_NR_CLK 16
-
/* List Of Clocks For CMU_MFC */
@@ -241,8 +229,6 @@
#define MFC_CLK_MFC 3
#define MFC_CLK_SMMU2_MFCM1 4
#define MFC_CLK_SMMU2_MFCM0 5
-#define MFC_NR_CLK 6
-
/* List Of Clocks For CMU_GSCL */
@@ -272,8 +258,6 @@
#define GSCL_CLK_SMMU3_MSCL1 24
#define GSCL_SCLK_CSIS1_WRAP 25
#define GSCL_SCLK_CSIS0_WRAP 26
-#define GSCL_NR_CLK 27
-
/* List Of Clocks For CMU_FSYS */
@@ -295,8 +279,6 @@
#define FSYS_CLK_SMMU_RTIC 16
#define FSYS_PHYCLK_USBDRD30 17
#define FSYS_PHYCLK_USBHOST20 18
-#define FSYS_NR_CLK 19
-
/* List Of Clocks For CMU_PERI */
@@ -366,8 +348,6 @@
#define PERI_SCLK_SPDIF 64
#define PERI_SCLK_I2S 65
#define PERI_SCLK_PCM1 66
-#define PERI_NR_CLK 67
-
/* List Of Clocks For CMU_DISP */
@@ -406,8 +386,6 @@
#define DISP_CLK_DP 33
#define DISP_SCLK_PIXEL 34
#define DISP_MOUT_HDMI_PHY_PIXEL_USER 35
-#define DISP_NR_CLK 36
-
/* List Of Clocks For CMU_G2D */
@@ -423,8 +401,6 @@
#define G2D_CLK_SMMU_SSS 10
#define G2D_CLK_SMMU_MDMA 11
#define G2D_CLK_SMMU3_G2D 12
-#define G2D_NR_CLK 13
-
/* List Of Clocks For CMU_ISP */
@@ -461,6 +437,5 @@
#define ISP_SCLK_SPI0_EXT 31
#define ISP_SCLK_SPI1_EXT 32
#define ISP_SCLK_UART_EXT 33
-#define ISP_NR_CLK 34
#endif
diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h
index 86c2ad56c5ef..7a1a93f8df6c 100644
--- a/include/dt-bindings/clock/exynos5410.h
+++ b/include/dt-bindings/clock/exynos5410.h
@@ -61,6 +61,4 @@
#define CLK_USBD301 367
#define CLK_SSS 471
-#define CLK_NR_CLKS 512
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5410_H */
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 9fffc6ceaadd..73e82527a9e9 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -271,7 +271,4 @@
#define CLK_DOUT_PCLK_DREX0 798
#define CLK_DOUT_PCLK_DREX1 799
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 800
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 25ffa53573a5..d12c1a963fa1 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -188,8 +188,6 @@
#define CLK_SCLK_ISP_SPI0_CAM1 252
#define CLK_SCLK_HDMI_SPDIF_DISP 253
-#define TOP_NR_CLK 254
-
/* CMU_CPIF */
#define CLK_FOUT_MPHY_PLL 1
@@ -200,8 +198,6 @@
#define CLK_SCLK_MPHY_PLL 11
#define CLK_SCLK_UFS_MPHY 11
-#define CPIF_NR_CLK 12
-
/* CMU_MIF */
#define CLK_FOUT_MEM0_PLL 1
#define CLK_FOUT_MEM1_PLL 2
@@ -396,8 +392,6 @@
#define CLK_SCLK_BUS_PLL_APOLLO 199
#define CLK_SCLK_BUS_PLL_ATLAS 200
-#define MIF_NR_CLK 201
-
/* CMU_PERIC */
#define CLK_PCLK_SPI2 1
#define CLK_PCLK_SPI1 2
@@ -468,8 +462,6 @@
#define CLK_DIV_SCLK_SCI 70
#define CLK_DIV_SCLK_SC_IN 71
-#define PERIC_NR_CLK 72
-
/* CMU_PERIS */
#define CLK_PCLK_HPM_APBIF 1
#define CLK_PCLK_TMU1_APBIF 2
@@ -513,8 +505,6 @@
#define CLK_SCLK_ANTIRBK_CNT 40
#define CLK_SCLK_OTP_CON 41
-#define PERIS_NR_CLK 42
-
/* CMU_FSYS */
#define CLK_MOUT_ACLK_FSYS_200_USER 1
#define CLK_MOUT_SCLK_MMC2_USER 2
@@ -621,8 +611,6 @@
#define CLK_SCLK_USBDRD30 114
#define CLK_PCIE 115
-#define FSYS_NR_CLK 116
-
/* CMU_G2D */
#define CLK_MUX_ACLK_G2D_266_USER 1
#define CLK_MUX_ACLK_G2D_400_USER 2
@@ -653,8 +641,6 @@
#define CLK_PCLK_G2D 25
#define CLK_PCLK_SMMU_G2D 26
-#define G2D_NR_CLK 27
-
/* CMU_DISP */
#define CLK_FOUT_DISP_PLL 1
@@ -771,8 +757,6 @@
#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114
#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115
-#define DISP_NR_CLK 116
-
/* CMU_AUD */
#define CLK_MOUT_AUD_PLL_USER 1
#define CLK_MOUT_SCLK_AUD_PCM 2
@@ -824,8 +808,6 @@
#define CLK_SCLK_I2S_BCLK 46
#define CLK_SCLK_AUD_I2S 47
-#define AUD_NR_CLK 48
-
/* CMU_BUS{0|1|2} */
#define CLK_DIV_PCLK_BUS_133 1
@@ -840,8 +822,6 @@
#define CLK_ACLK_BUS2BEND_400 9 /* Only CMU_BUS2 */
#define CLK_ACLK_BUS2RTND_400 10 /* Only CMU_BUS2 */
-#define BUSx_NR_CLK 11
-
/* CMU_G3D */
#define CLK_FOUT_G3D_PLL 1
@@ -865,8 +845,6 @@
#define CLK_PCLK_SYSREG_G3D 18
#define CLK_SCLK_HPM_G3D 19
-#define G3D_NR_CLK 20
-
/* CMU_GSCL */
#define CLK_MOUT_ACLK_GSCL_111_USER 1
#define CLK_MOUT_ACLK_GSCL_333_USER 2
@@ -898,8 +876,6 @@
#define CLK_PCLK_SMMU_GSCL1 27
#define CLK_PCLK_SMMU_GSCL2 28
-#define GSCL_NR_CLK 29
-
/* CMU_APOLLO */
#define CLK_FOUT_APOLLO_PLL 1
@@ -935,8 +911,6 @@
#define CLK_SCLK_HPM_APOLLO 29
#define CLK_SCLK_APOLLO 30
-#define APOLLO_NR_CLK 31
-
/* CMU_ATLAS */
#define CLK_FOUT_ATLAS_PLL 1
@@ -981,8 +955,6 @@
#define CLK_ATCLK 38
#define CLK_SCLK_ATLAS 39
-#define ATLAS_NR_CLK 40
-
/* CMU_MSCL */
#define CLK_MOUT_SCLK_JPEG_USER 1
#define CLK_MOUT_ACLK_MSCL_400_USER 2
@@ -1016,8 +988,6 @@
#define CLK_PCLK_SMMU_JPEG 28
#define CLK_SCLK_JPEG 29
-#define MSCL_NR_CLK 30
-
/* CMU_MFC */
#define CLK_MOUT_ACLK_MFC_400_USER 1
@@ -1040,8 +1010,6 @@
#define CLK_PCLK_SMMU_MFC_1 17
#define CLK_PCLK_SMMU_MFC_0 18
-#define MFC_NR_CLK 19
-
/* CMU_HEVC */
#define CLK_MOUT_ACLK_HEVC_400_USER 1
@@ -1064,8 +1032,6 @@
#define CLK_PCLK_SMMU_HEVC_1 17
#define CLK_PCLK_SMMU_HEVC_0 18
-#define HEVC_NR_CLK 19
-
/* CMU_ISP */
#define CLK_MOUT_ACLK_ISP_DIS_400_USER 1
#define CLK_MOUT_ACLK_ISP_400_USER 2
@@ -1147,8 +1113,6 @@
#define CLK_SCLK_PIXELASYNCS_ISPC 76
#define CLK_SCLK_PIXELASYNCM_ISPC 77
-#define ISP_NR_CLK 78
-
/* CMU_CAM0 */
#define CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY 1
#define CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY 2
@@ -1285,8 +1249,6 @@
#define CLK_SCLK_PIXELASYNCM_LITE_C_INIT 132
#define CLK_SCLK_PIXELASYNCS_LITE_C_INIT 133
-#define CAM0_NR_CLK 134
-
/* CMU_CAM1 */
#define CLK_PHYCLK_RXBYTEECLKHS0_S2B 1
@@ -1404,12 +1366,8 @@
#define CLK_ATCLK_ISP 111
#define CLK_SCLK_ISP_CA5 112
-#define CAM1_NR_CLK 113
-
/* CMU_IMEM */
#define CLK_ACLK_SLIMSSS 2
#define CLK_PCLK_SLIMSSS 35
-#define IMEM_NR_CLK 36
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
index 8256e7430b63..255e3aa94323 100644
--- a/include/dt-bindings/clock/exynos7885.h
+++ b/include/dt-bindings/clock/exynos7885.h
@@ -69,7 +69,6 @@
#define CLK_GOUT_FSYS_MMC_EMBD 58
#define CLK_GOUT_FSYS_MMC_SDIO 59
#define CLK_GOUT_FSYS_USB30DRD 60
-#define TOP_NR_CLK 61
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
@@ -86,7 +85,6 @@
#define CLK_GOUT_TREX_P_CORE_CCLK_P_CORE 12
#define CLK_GOUT_TREX_P_CORE_PCLK 13
#define CLK_GOUT_TREX_P_CORE_PCLK_P_CORE 14
-#define CORE_NR_CLK 15
/* CMU_PERI */
#define CLK_MOUT_PERI_BUS_USER 1
@@ -132,7 +130,6 @@
#define CLK_GOUT_SYSREG_PERI_PCLK 41
#define CLK_GOUT_WDT0_PCLK 42
#define CLK_GOUT_WDT1_PCLK 43
-#define PERI_NR_CLK 44
/* CMU_FSYS */
#define CLK_MOUT_FSYS_BUS_USER 1
@@ -146,6 +143,5 @@
#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
#define CLK_GOUT_MMC_SDIO_ACLK 9
#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
-#define FSYS_NR_CLK 11
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
index afacba338c91..3090e09c9a55 100644
--- a/include/dt-bindings/clock/exynos850.h
+++ b/include/dt-bindings/clock/exynos850.h
@@ -88,7 +88,6 @@
#define CLK_MOUT_G3D_SWITCH 76
#define CLK_GOUT_G3D_SWITCH 77
#define CLK_DOUT_G3D_SWITCH 78
-#define TOP_NR_CLK 79
/* CMU_APM */
#define CLK_RCO_I3C_PMIC 1
@@ -115,7 +114,6 @@
#define CLK_GOUT_GPIO_ALIVE_PCLK 22
#define CLK_GOUT_PMU_ALIVE_PCLK 23
#define CLK_GOUT_SYSREG_APM_PCLK 24
-#define APM_NR_CLK 25
/* CMU_AUD */
#define CLK_DOUT_AUD_AUDIF 1
@@ -179,7 +177,6 @@
#define IOCLK_AUDIOCDCLK6 59
#define TICK_USB 60
#define CLK_GOUT_AUD_CMU_AUD_PCLK 61
-#define AUD_NR_CLK 62
/* CMU_CMGP */
#define CLK_RCO_CMGP 1
@@ -197,7 +194,6 @@
#define CLK_GOUT_CMGP_USI1_IPCLK 13
#define CLK_GOUT_CMGP_USI1_PCLK 14
#define CLK_GOUT_SYSREG_CMGP_PCLK 15
-#define CMGP_NR_CLK 16
/* CMU_G3D */
#define CLK_FOUT_G3D_PLL 1
@@ -212,7 +208,6 @@
#define CLK_GOUT_G3D_BUSD_CLK 10
#define CLK_GOUT_G3D_BUSP_CLK 11
#define CLK_GOUT_G3D_SYSREG_PCLK 12
-#define G3D_NR_CLK 13
/* CMU_HSI */
#define CLK_MOUT_HSI_BUS_USER 1
@@ -231,7 +226,6 @@
#define CLK_GOUT_HSI_PPMU_ACLK 14
#define CLK_GOUT_HSI_PPMU_PCLK 15
#define CLK_GOUT_HSI_CMU_HSI_PCLK 16
-#define HSI_NR_CLK 17
/* CMU_IS */
#define CLK_MOUT_IS_BUS_USER 1
@@ -257,7 +251,6 @@
#define CLK_GOUT_IS_SYSMMU_IS0_CLK 21
#define CLK_GOUT_IS_SYSMMU_IS1_CLK 22
#define CLK_GOUT_IS_SYSREG_PCLK 23
-#define IS_NR_CLK 24
/* CMU_MFCMSCL */
#define CLK_MOUT_MFCMSCL_MFC_USER 1
@@ -275,7 +268,6 @@
#define CLK_GOUT_MFCMSCL_PPMU_PCLK 13
#define CLK_GOUT_MFCMSCL_SYSMMU_CLK 14
#define CLK_GOUT_MFCMSCL_SYSREG_PCLK 15
-#define MFCMSCL_NR_CLK 16
/* CMU_PERI */
#define CLK_MOUT_PERI_BUS_USER 1
@@ -312,7 +304,6 @@
#define CLK_GOUT_UART_PCLK 32
#define CLK_GOUT_WDT0_PCLK 33
#define CLK_GOUT_WDT1_PCLK 34
-#define PERI_NR_CLK 35
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
@@ -329,7 +320,6 @@
#define CLK_GOUT_SSS_PCLK 12
#define CLK_GOUT_GPIO_CORE_PCLK 13
#define CLK_GOUT_SYSREG_CORE_PCLK 14
-#define CORE_NR_CLK 15
/* CMU_DPU */
#define CLK_MOUT_DPU_USER 1
diff --git a/include/dt-bindings/clock/g12a-aoclkc.h b/include/dt-bindings/clock/g12a-aoclkc.h
index e916e49ff288..8fe7712fb12d 100644
--- a/include/dt-bindings/clock/g12a-aoclkc.h
+++ b/include/dt-bindings/clock/g12a-aoclkc.h
@@ -26,10 +26,17 @@
#define CLKID_AO_M4_FCLK 13
#define CLKID_AO_M4_HCLK 14
#define CLKID_AO_CLK81 15
+#define CLKID_AO_SAR_ADC_DIV 17
#define CLKID_AO_SAR_ADC_SEL 16
#define CLKID_AO_SAR_ADC_CLK 18
#define CLKID_AO_CTS_OSCIN 19
+#define CLKID_AO_32K_PRE 20
+#define CLKID_AO_32K_DIV 21
+#define CLKID_AO_32K_SEL 22
#define CLKID_AO_32K 23
+#define CLKID_AO_CEC_PRE 24
+#define CLKID_AO_CEC_DIV 25
+#define CLKID_AO_CEC_SEL 26
#define CLKID_AO_CEC 27
#define CLKID_AO_CTS_RTC_OSCIN 28
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index a93b58c5e18e..fd09819da2ec 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -16,6 +16,8 @@
#define CLKID_FCLK_DIV5 5
#define CLKID_FCLK_DIV7 6
#define CLKID_GP0_PLL 7
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
#define CLKID_CLK81 10
#define CLKID_MPLL0 11
#define CLKID_MPLL1 12
@@ -69,7 +71,23 @@
#define CLKID_SD_EMMC_A_CLK0 60
#define CLKID_SD_EMMC_B_CLK0 61
#define CLKID_SD_EMMC_C_CLK0 62
+#define CLKID_SD_EMMC_A_CLK0_SEL 63
+#define CLKID_SD_EMMC_A_CLK0_DIV 64
+#define CLKID_SD_EMMC_B_CLK0_SEL 65
+#define CLKID_SD_EMMC_B_CLK0_DIV 66
+#define CLKID_SD_EMMC_C_CLK0_SEL 67
+#define CLKID_SD_EMMC_C_CLK0_DIV 68
+#define CLKID_MPLL0_DIV 69
+#define CLKID_MPLL1_DIV 70
+#define CLKID_MPLL2_DIV 71
+#define CLKID_MPLL3_DIV 72
+#define CLKID_MPLL_PREDIV 73
#define CLKID_HIFI_PLL 74
+#define CLKID_FCLK_DIV2_DIV 75
+#define CLKID_FCLK_DIV3_DIV 76
+#define CLKID_FCLK_DIV4_DIV 77
+#define CLKID_FCLK_DIV5_DIV 78
+#define CLKID_FCLK_DIV7_DIV 79
#define CLKID_VCLK2_VENCI0 80
#define CLKID_VCLK2_VENCI1 81
#define CLKID_VCLK2_VENCP0 82
@@ -90,26 +108,54 @@
#define CLKID_VCLK2_VENCL 97
#define CLKID_VCLK2_OTHER1 98
#define CLKID_FCLK_DIV2P5 99
+#define CLKID_FCLK_DIV2P5_DIV 100
+#define CLKID_FIXED_PLL_DCO 101
+#define CLKID_SYS_PLL_DCO 102
+#define CLKID_GP0_PLL_DCO 103
+#define CLKID_HIFI_PLL_DCO 104
#define CLKID_DMA 105
#define CLKID_EFUSE 106
#define CLKID_ROM_BOOT 107
#define CLKID_RESET_SEC 108
#define CLKID_SEC_AHB_APB3 109
#define CLKID_VPU_0_SEL 110
+#define CLKID_VPU_0_DIV 111
#define CLKID_VPU_0 112
#define CLKID_VPU_1_SEL 113
+#define CLKID_VPU_1_DIV 114
#define CLKID_VPU_1 115
#define CLKID_VPU 116
#define CLKID_VAPB_0_SEL 117
+#define CLKID_VAPB_0_DIV 118
#define CLKID_VAPB_0 119
#define CLKID_VAPB_1_SEL 120
+#define CLKID_VAPB_1_DIV 121
#define CLKID_VAPB_1 122
#define CLKID_VAPB_SEL 123
#define CLKID_VAPB 124
+#define CLKID_HDMI_PLL_DCO 125
+#define CLKID_HDMI_PLL_OD 126
+#define CLKID_HDMI_PLL_OD2 127
#define CLKID_HDMI_PLL 128
#define CLKID_VID_PLL 129
+#define CLKID_VID_PLL_SEL 130
+#define CLKID_VID_PLL_DIV 131
+#define CLKID_VCLK_SEL 132
+#define CLKID_VCLK2_SEL 133
+#define CLKID_VCLK_INPUT 134
+#define CLKID_VCLK2_INPUT 135
+#define CLKID_VCLK_DIV 136
+#define CLKID_VCLK2_DIV 137
#define CLKID_VCLK 138
#define CLKID_VCLK2 139
+#define CLKID_VCLK_DIV2_EN 140
+#define CLKID_VCLK_DIV4_EN 141
+#define CLKID_VCLK_DIV6_EN 142
+#define CLKID_VCLK_DIV12_EN 143
+#define CLKID_VCLK2_DIV2_EN 144
+#define CLKID_VCLK2_DIV4_EN 145
+#define CLKID_VCLK2_DIV6_EN 146
+#define CLKID_VCLK2_DIV12_EN 147
#define CLKID_VCLK_DIV1 148
#define CLKID_VCLK_DIV2 149
#define CLKID_VCLK_DIV4 150
@@ -120,34 +166,126 @@
#define CLKID_VCLK2_DIV4 155
#define CLKID_VCLK2_DIV6 156
#define CLKID_VCLK2_DIV12 157
+#define CLKID_CTS_ENCI_SEL 158
+#define CLKID_CTS_ENCP_SEL 159
+#define CLKID_CTS_VDAC_SEL 160
+#define CLKID_HDMI_TX_SEL 161
#define CLKID_CTS_ENCI 162
#define CLKID_CTS_ENCP 163
#define CLKID_CTS_VDAC 164
#define CLKID_HDMI_TX 165
+#define CLKID_HDMI_SEL 166
+#define CLKID_HDMI_DIV 167
#define CLKID_HDMI 168
#define CLKID_MALI_0_SEL 169
+#define CLKID_MALI_0_DIV 170
#define CLKID_MALI_0 171
#define CLKID_MALI_1_SEL 172
+#define CLKID_MALI_1_DIV 173
#define CLKID_MALI_1 174
#define CLKID_MALI 175
+#define CLKID_MPLL_50M_DIV 176
#define CLKID_MPLL_50M 177
+#define CLKID_SYS_PLL_DIV16_EN 178
+#define CLKID_SYS_PLL_DIV16 179
+#define CLKID_CPU_CLK_DYN0_SEL 180
+#define CLKID_CPU_CLK_DYN0_DIV 181
+#define CLKID_CPU_CLK_DYN0 182
+#define CLKID_CPU_CLK_DYN1_SEL 183
+#define CLKID_CPU_CLK_DYN1_DIV 184
+#define CLKID_CPU_CLK_DYN1 185
+#define CLKID_CPU_CLK_DYN 186
#define CLKID_CPU_CLK 187
+#define CLKID_CPU_CLK_DIV16_EN 188
+#define CLKID_CPU_CLK_DIV16 189
+#define CLKID_CPU_CLK_APB_DIV 190
+#define CLKID_CPU_CLK_APB 191
+#define CLKID_CPU_CLK_ATB_DIV 192
+#define CLKID_CPU_CLK_ATB 193
+#define CLKID_CPU_CLK_AXI_DIV 194
+#define CLKID_CPU_CLK_AXI 195
+#define CLKID_CPU_CLK_TRACE_DIV 196
+#define CLKID_CPU_CLK_TRACE 197
+#define CLKID_PCIE_PLL_DCO 198
+#define CLKID_PCIE_PLL_DCO_DIV2 199
+#define CLKID_PCIE_PLL_OD 200
#define CLKID_PCIE_PLL 201
+#define CLKID_VDEC_1_SEL 202
+#define CLKID_VDEC_1_DIV 203
#define CLKID_VDEC_1 204
+#define CLKID_VDEC_HEVC_SEL 205
+#define CLKID_VDEC_HEVC_DIV 206
#define CLKID_VDEC_HEVC 207
+#define CLKID_VDEC_HEVCF_SEL 208
+#define CLKID_VDEC_HEVCF_DIV 209
#define CLKID_VDEC_HEVCF 210
+#define CLKID_TS_DIV 211
#define CLKID_TS 212
+#define CLKID_SYS1_PLL_DCO 213
+#define CLKID_SYS1_PLL 214
+#define CLKID_SYS1_PLL_DIV16_EN 215
+#define CLKID_SYS1_PLL_DIV16 216
+#define CLKID_CPUB_CLK_DYN0_SEL 217
+#define CLKID_CPUB_CLK_DYN0_DIV 218
+#define CLKID_CPUB_CLK_DYN0 219
+#define CLKID_CPUB_CLK_DYN1_SEL 220
+#define CLKID_CPUB_CLK_DYN1_DIV 221
+#define CLKID_CPUB_CLK_DYN1 222
+#define CLKID_CPUB_CLK_DYN 223
#define CLKID_CPUB_CLK 224
+#define CLKID_CPUB_CLK_DIV16_EN 225
+#define CLKID_CPUB_CLK_DIV16 226
+#define CLKID_CPUB_CLK_DIV2 227
+#define CLKID_CPUB_CLK_DIV3 228
+#define CLKID_CPUB_CLK_DIV4 229
+#define CLKID_CPUB_CLK_DIV5 230
+#define CLKID_CPUB_CLK_DIV6 231
+#define CLKID_CPUB_CLK_DIV7 232
+#define CLKID_CPUB_CLK_DIV8 233
+#define CLKID_CPUB_CLK_APB_SEL 234
+#define CLKID_CPUB_CLK_APB 235
+#define CLKID_CPUB_CLK_ATB_SEL 236
+#define CLKID_CPUB_CLK_ATB 237
+#define CLKID_CPUB_CLK_AXI_SEL 238
+#define CLKID_CPUB_CLK_AXI 239
+#define CLKID_CPUB_CLK_TRACE_SEL 240
+#define CLKID_CPUB_CLK_TRACE 241
+#define CLKID_GP1_PLL_DCO 242
#define CLKID_GP1_PLL 243
+#define CLKID_DSU_CLK_DYN0_SEL 244
+#define CLKID_DSU_CLK_DYN0_DIV 245
+#define CLKID_DSU_CLK_DYN0 246
+#define CLKID_DSU_CLK_DYN1_SEL 247
+#define CLKID_DSU_CLK_DYN1_DIV 248
+#define CLKID_DSU_CLK_DYN1 249
+#define CLKID_DSU_CLK_DYN 250
+#define CLKID_DSU_CLK_FINAL 251
#define CLKID_DSU_CLK 252
#define CLKID_CPU1_CLK 253
#define CLKID_CPU2_CLK 254
#define CLKID_CPU3_CLK 255
+#define CLKID_SPICC0_SCLK_SEL 256
+#define CLKID_SPICC0_SCLK_DIV 257
#define CLKID_SPICC0_SCLK 258
+#define CLKID_SPICC1_SCLK_SEL 259
+#define CLKID_SPICC1_SCLK_DIV 260
#define CLKID_SPICC1_SCLK 261
+#define CLKID_NNA_AXI_CLK_SEL 262
+#define CLKID_NNA_AXI_CLK_DIV 263
#define CLKID_NNA_AXI_CLK 264
+#define CLKID_NNA_CORE_CLK_SEL 265
+#define CLKID_NNA_CORE_CLK_DIV 266
#define CLKID_NNA_CORE_CLK 267
+#define CLKID_MIPI_DSI_PXCLK_DIV 268
#define CLKID_MIPI_DSI_PXCLK_SEL 269
#define CLKID_MIPI_DSI_PXCLK 270
+#define CLKID_CTS_ENCL 271
+#define CLKID_CTS_ENCL_SEL 272
+#define CLKID_MIPI_ISP_DIV 273
+#define CLKID_MIPI_ISP_SEL 274
+#define CLKID_MIPI_ISP 275
+#define CLKID_MIPI_ISP_GATE 276
+#define CLKID_MIPI_ISP_CSI_PHY0 277
+#define CLKID_MIPI_ISP_CSI_PHY1 278
#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/google,gs101.h b/include/dt-bindings/clock/google,gs101.h
new file mode 100644
index 000000000000..21adec22387c
--- /dev/null
+++ b/include/dt-bindings/clock/google,gs101.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * Device Tree binding constants for Google gs101 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_GOOGLE_GS101_H
+#define _DT_BINDINGS_CLOCK_GOOGLE_GS101_H
+
+/* CMU_TOP PLL */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SPARE_PLL 5
+
+/* CMU_TOP MUX */
+#define CLK_MOUT_PLL_SHARED0 6
+#define CLK_MOUT_PLL_SHARED1 7
+#define CLK_MOUT_PLL_SHARED2 8
+#define CLK_MOUT_PLL_SHARED3 9
+#define CLK_MOUT_PLL_SPARE 10
+#define CLK_MOUT_CMU_BO_BUS 11
+#define CLK_MOUT_CMU_BUS0_BUS 12
+#define CLK_MOUT_CMU_BUS1_BUS 13
+#define CLK_MOUT_CMU_BUS2_BUS 14
+#define CLK_MOUT_CMU_CIS_CLK0 15
+#define CLK_MOUT_CMU_CIS_CLK1 16
+#define CLK_MOUT_CMU_CIS_CLK2 17
+#define CLK_MOUT_CMU_CIS_CLK3 18
+#define CLK_MOUT_CMU_CIS_CLK4 19
+#define CLK_MOUT_CMU_CIS_CLK5 20
+#define CLK_MOUT_CMU_CIS_CLK6 21
+#define CLK_MOUT_CMU_CIS_CLK7 22
+#define CLK_MOUT_CMU_CMU_BOOST 23
+#define CLK_MOUT_CMU_BOOST_OPTION1 24
+#define CLK_MOUT_CMU_CORE_BUS 25
+#define CLK_MOUT_CMU_CPUCL0_DBG 26
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 27
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 28
+#define CLK_MOUT_CMU_CPUCL2_SWITCH 29
+#define CLK_MOUT_CMU_CSIS_BUS 30
+#define CLK_MOUT_CMU_DISP_BUS 31
+#define CLK_MOUT_CMU_DNS_BUS 32
+#define CLK_MOUT_CMU_DPU_BUS 33
+#define CLK_MOUT_CMU_EH_BUS 34
+#define CLK_MOUT_CMU_G2D_G2D 35
+#define CLK_MOUT_CMU_G2D_MSCL 36
+#define CLK_MOUT_CMU_G3AA_G3AA 37
+#define CLK_MOUT_CMU_G3D_BUSD 38
+#define CLK_MOUT_CMU_G3D_GLB 39
+#define CLK_MOUT_CMU_G3D_SWITCH 40
+#define CLK_MOUT_CMU_GDC_GDC0 41
+#define CLK_MOUT_CMU_GDC_GDC1 42
+#define CLK_MOUT_CMU_GDC_SCSC 43
+#define CLK_MOUT_CMU_HPM 44
+#define CLK_MOUT_CMU_HSI0_BUS 45
+#define CLK_MOUT_CMU_HSI0_DPGTC 46
+#define CLK_MOUT_CMU_HSI0_USB31DRD 47
+#define CLK_MOUT_CMU_HSI0_USBDPDBG 48
+#define CLK_MOUT_CMU_HSI1_BUS 49
+#define CLK_MOUT_CMU_HSI1_PCIE 50
+#define CLK_MOUT_CMU_HSI2_BUS 51
+#define CLK_MOUT_CMU_HSI2_MMC_CARD 52
+#define CLK_MOUT_CMU_HSI2_PCIE 53
+#define CLK_MOUT_CMU_HSI2_UFS_EMBD 54
+#define CLK_MOUT_CMU_IPP_BUS 55
+#define CLK_MOUT_CMU_ITP_BUS 56
+#define CLK_MOUT_CMU_MCSC_ITSC 57
+#define CLK_MOUT_CMU_MCSC_MCSC 58
+#define CLK_MOUT_CMU_MFC_MFC 59
+#define CLK_MOUT_CMU_MIF_BUSP 60
+#define CLK_MOUT_CMU_MIF_SWITCH 61
+#define CLK_MOUT_CMU_MISC_BUS 62
+#define CLK_MOUT_CMU_MISC_SSS 63
+#define CLK_MOUT_CMU_PDP_BUS 64
+#define CLK_MOUT_CMU_PDP_VRA 65
+#define CLK_MOUT_CMU_PERIC0_BUS 66
+#define CLK_MOUT_CMU_PERIC0_IP 67
+#define CLK_MOUT_CMU_PERIC1_BUS 68
+#define CLK_MOUT_CMU_PERIC1_IP 69
+#define CLK_MOUT_CMU_TNR_BUS 70
+#define CLK_MOUT_CMU_TOP_BOOST_OPTION1 71
+#define CLK_MOUT_CMU_TOP_CMUREF 72
+#define CLK_MOUT_CMU_TPU_BUS 73
+#define CLK_MOUT_CMU_TPU_TPU 74
+#define CLK_MOUT_CMU_TPU_TPUCTL 75
+#define CLK_MOUT_CMU_TPU_UART 76
+#define CLK_MOUT_CMU_CMUREF 77
+
+/* CMU_TOP Dividers */
+#define CLK_DOUT_CMU_BO_BUS 78
+#define CLK_DOUT_CMU_BUS0_BUS 79
+#define CLK_DOUT_CMU_BUS1_BUS 80
+#define CLK_DOUT_CMU_BUS2_BUS 81
+#define CLK_DOUT_CMU_CIS_CLK0 82
+#define CLK_DOUT_CMU_CIS_CLK1 83
+#define CLK_DOUT_CMU_CIS_CLK2 84
+#define CLK_DOUT_CMU_CIS_CLK3 85
+#define CLK_DOUT_CMU_CIS_CLK4 86
+#define CLK_DOUT_CMU_CIS_CLK5 87
+#define CLK_DOUT_CMU_CIS_CLK6 88
+#define CLK_DOUT_CMU_CIS_CLK7 89
+#define CLK_DOUT_CMU_CORE_BUS 90
+#define CLK_DOUT_CMU_CPUCL0_DBG 91
+#define CLK_DOUT_CMU_CPUCL0_SWITCH 92
+#define CLK_DOUT_CMU_CPUCL1_SWITCH 93
+#define CLK_DOUT_CMU_CPUCL2_SWITCH 94
+#define CLK_DOUT_CMU_CSIS_BUS 95
+#define CLK_DOUT_CMU_DISP_BUS 96
+#define CLK_DOUT_CMU_DNS_BUS 97
+#define CLK_DOUT_CMU_DPU_BUS 98
+#define CLK_DOUT_CMU_EH_BUS 99
+#define CLK_DOUT_CMU_G2D_G2D 100
+#define CLK_DOUT_CMU_G2D_MSCL 101
+#define CLK_DOUT_CMU_G3AA_G3AA 102
+#define CLK_DOUT_CMU_G3D_BUSD 103
+#define CLK_DOUT_CMU_G3D_GLB 104
+#define CLK_DOUT_CMU_G3D_SWITCH 105
+#define CLK_DOUT_CMU_GDC_GDC0 106
+#define CLK_DOUT_CMU_GDC_GDC1 107
+#define CLK_DOUT_CMU_GDC_SCSC 108
+#define CLK_DOUT_CMU_CMU_HPM 109
+#define CLK_DOUT_CMU_HSI0_BUS 110
+#define CLK_DOUT_CMU_HSI0_DPGTC 111
+#define CLK_DOUT_CMU_HSI0_USB31DRD 112
+#define CLK_DOUT_CMU_HSI0_USBDPDBG 113
+#define CLK_DOUT_CMU_HSI1_BUS 114
+#define CLK_DOUT_CMU_HSI1_PCIE 115
+#define CLK_DOUT_CMU_HSI2_BUS 116
+#define CLK_DOUT_CMU_HSI2_MMC_CARD 117
+#define CLK_DOUT_CMU_HSI2_PCIE 118
+#define CLK_DOUT_CMU_HSI2_UFS_EMBD 119
+#define CLK_DOUT_CMU_IPP_BUS 120
+#define CLK_DOUT_CMU_ITP_BUS 121
+#define CLK_DOUT_CMU_MCSC_ITSC 122
+#define CLK_DOUT_CMU_MCSC_MCSC 123
+#define CLK_DOUT_CMU_MFC_MFC 124
+#define CLK_DOUT_CMU_MIF_BUSP 125
+#define CLK_DOUT_CMU_MISC_BUS 126
+#define CLK_DOUT_CMU_MISC_SSS 127
+#define CLK_DOUT_CMU_OTP 128
+#define CLK_DOUT_CMU_PDP_BUS 129
+#define CLK_DOUT_CMU_PDP_VRA 130
+#define CLK_DOUT_CMU_PERIC0_BUS 131
+#define CLK_DOUT_CMU_PERIC0_IP 132
+#define CLK_DOUT_CMU_PERIC1_BUS 133
+#define CLK_DOUT_CMU_PERIC1_IP 134
+#define CLK_DOUT_CMU_TNR_BUS 135
+#define CLK_DOUT_CMU_TPU_BUS 136
+#define CLK_DOUT_CMU_TPU_TPU 137
+#define CLK_DOUT_CMU_TPU_TPUCTL 138
+#define CLK_DOUT_CMU_TPU_UART 139
+#define CLK_DOUT_CMU_CMU_BOOST 140
+#define CLK_DOUT_CMU_CMU_CMUREF 141
+#define CLK_DOUT_CMU_SHARED0_DIV2 142
+#define CLK_DOUT_CMU_SHARED0_DIV3 143
+#define CLK_DOUT_CMU_SHARED0_DIV4 144
+#define CLK_DOUT_CMU_SHARED0_DIV5 145
+#define CLK_DOUT_CMU_SHARED1_DIV2 146
+#define CLK_DOUT_CMU_SHARED1_DIV3 147
+#define CLK_DOUT_CMU_SHARED1_DIV4 148
+#define CLK_DOUT_CMU_SHARED2_DIV2 149
+#define CLK_DOUT_CMU_SHARED3_DIV2 150
+
+/* CMU_TOP Gates */
+#define CLK_GOUT_CMU_BUS0_BOOST 151
+#define CLK_GOUT_CMU_BUS1_BOOST 152
+#define CLK_GOUT_CMU_BUS2_BOOST 153
+#define CLK_GOUT_CMU_CORE_BOOST 154
+#define CLK_GOUT_CMU_CPUCL0_BOOST 155
+#define CLK_GOUT_CMU_CPUCL1_BOOST 156
+#define CLK_GOUT_CMU_CPUCL2_BOOST 157
+#define CLK_GOUT_CMU_MIF_BOOST 158
+#define CLK_GOUT_CMU_MIF_SWITCH 159
+#define CLK_GOUT_CMU_BO_BUS 160
+#define CLK_GOUT_CMU_BUS0_BUS 161
+#define CLK_GOUT_CMU_BUS1_BUS 162
+#define CLK_GOUT_CMU_BUS2_BUS 163
+#define CLK_GOUT_CMU_CIS_CLK0 164
+#define CLK_GOUT_CMU_CIS_CLK1 165
+#define CLK_GOUT_CMU_CIS_CLK2 166
+#define CLK_GOUT_CMU_CIS_CLK3 167
+#define CLK_GOUT_CMU_CIS_CLK4 168
+#define CLK_GOUT_CMU_CIS_CLK5 169
+#define CLK_GOUT_CMU_CIS_CLK6 170
+#define CLK_GOUT_CMU_CIS_CLK7 171
+#define CLK_GOUT_CMU_CMU_BOOST 172
+#define CLK_GOUT_CMU_CORE_BUS 173
+#define CLK_GOUT_CMU_CPUCL0_DBG 174
+#define CLK_GOUT_CMU_CPUCL0_SWITCH 175
+#define CLK_GOUT_CMU_CPUCL1_SWITCH 176
+#define CLK_GOUT_CMU_CPUCL2_SWITCH 177
+#define CLK_GOUT_CMU_CSIS_BUS 178
+#define CLK_GOUT_CMU_DISP_BUS 179
+#define CLK_GOUT_CMU_DNS_BUS 180
+#define CLK_GOUT_CMU_DPU_BUS 181
+#define CLK_GOUT_CMU_EH_BUS 182
+#define CLK_GOUT_CMU_G2D_G2D 183
+#define CLK_GOUT_CMU_G2D_MSCL 184
+#define CLK_GOUT_CMU_G3AA_G3AA 185
+#define CLK_GOUT_CMU_G3D_BUSD 186
+#define CLK_GOUT_CMU_G3D_GLB 187
+#define CLK_GOUT_CMU_G3D_SWITCH 188
+#define CLK_GOUT_CMU_GDC_GDC0 189
+#define CLK_GOUT_CMU_GDC_GDC1 190
+#define CLK_GOUT_CMU_GDC_SCSC 191
+#define CLK_GOUT_CMU_HPM 192
+#define CLK_GOUT_CMU_HSI0_BUS 193
+#define CLK_GOUT_CMU_HSI0_DPGTC 194
+#define CLK_GOUT_CMU_HSI0_USB31DRD 195
+#define CLK_GOUT_CMU_HSI0_USBDPDBG 196
+#define CLK_GOUT_CMU_HSI1_BUS 197
+#define CLK_GOUT_CMU_HSI1_PCIE 198
+#define CLK_GOUT_CMU_HSI2_BUS 199
+#define CLK_GOUT_CMU_HSI2_MMC_CARD 200
+#define CLK_GOUT_CMU_HSI2_PCIE 201
+#define CLK_GOUT_CMU_HSI2_UFS_EMBD 202
+#define CLK_GOUT_CMU_IPP_BUS 203
+#define CLK_GOUT_CMU_ITP_BUS 204
+#define CLK_GOUT_CMU_MCSC_ITSC 205
+#define CLK_GOUT_CMU_MCSC_MCSC 206
+#define CLK_GOUT_CMU_MFC_MFC 207
+#define CLK_GOUT_CMU_MIF_BUSP 208
+#define CLK_GOUT_CMU_MISC_BUS 209
+#define CLK_GOUT_CMU_MISC_SSS 210
+#define CLK_GOUT_CMU_PDP_BUS 211
+#define CLK_GOUT_CMU_PDP_VRA 212
+#define CLK_GOUT_CMU_G3AA 213
+#define CLK_GOUT_CMU_PERIC0_BUS 214
+#define CLK_GOUT_CMU_PERIC0_IP 215
+#define CLK_GOUT_CMU_PERIC1_BUS 216
+#define CLK_GOUT_CMU_PERIC1_IP 217
+#define CLK_GOUT_CMU_TNR_BUS 218
+#define CLK_GOUT_CMU_TOP_CMUREF 219
+#define CLK_GOUT_CMU_TPU_BUS 220
+#define CLK_GOUT_CMU_TPU_TPU 221
+#define CLK_GOUT_CMU_TPU_TPUCTL 222
+#define CLK_GOUT_CMU_TPU_UART 223
+
+/* CMU_APM */
+#define CLK_MOUT_APM_FUNC 1
+#define CLK_MOUT_APM_FUNCSRC 2
+#define CLK_DOUT_APM_BOOST 3
+#define CLK_DOUT_APM_USI0_UART 4
+#define CLK_DOUT_APM_USI0_USI 5
+#define CLK_DOUT_APM_USI1_UART 6
+#define CLK_GOUT_APM_APM_CMU_APM_PCLK 7
+#define CLK_GOUT_BUS0_BOOST_OPTION1 8
+#define CLK_GOUT_CMU_BOOST_OPTION1 9
+#define CLK_GOUT_CORE_BOOST_OPTION1 10
+#define CLK_GOUT_APM_FUNC 11
+#define CLK_GOUT_APM_APBIF_GPIO_ALIVE_PCLK 12
+#define CLK_GOUT_APM_APBIF_GPIO_FAR_ALIVE_PCLK 13
+#define CLK_GOUT_APM_APBIF_PMU_ALIVE_PCLK 14
+#define CLK_GOUT_APM_APBIF_RTC_PCLK 15
+#define CLK_GOUT_APM_APBIF_TRTC_PCLK 16
+#define CLK_GOUT_APM_APM_USI0_UART_IPCLK 17
+#define CLK_GOUT_APM_APM_USI0_UART_PCLK 18
+#define CLK_GOUT_APM_APM_USI0_USI_IPCLK 19
+#define CLK_GOUT_APM_APM_USI0_USI_PCLK 20
+#define CLK_GOUT_APM_APM_USI1_UART_IPCLK 21
+#define CLK_GOUT_APM_APM_USI1_UART_PCLK 22
+#define CLK_GOUT_APM_D_TZPC_APM_PCLK 23
+#define CLK_GOUT_APM_GPC_APM_PCLK 24
+#define CLK_GOUT_APM_GREBEINTEGRATION_HCLK 25
+#define CLK_GOUT_APM_INTMEM_ACLK 26
+#define CLK_GOUT_APM_INTMEM_PCLK 27
+#define CLK_GOUT_APM_LHM_AXI_G_SWD_I_CLK 28
+#define CLK_GOUT_APM_LHM_AXI_P_AOCAPM_I_CLK 29
+#define CLK_GOUT_APM_LHM_AXI_P_APM_I_CLK 30
+#define CLK_GOUT_APM_LHS_AXI_D_APM_I_CLK 31
+#define CLK_GOUT_APM_LHS_AXI_G_DBGCORE_I_CLK 32
+#define CLK_GOUT_APM_LHS_AXI_G_SCAN2DRAM_I_CLK 33
+#define CLK_GOUT_APM_MAILBOX_APM_AOC_PCLK 34
+#define CLK_GOUT_APM_MAILBOX_APM_AP_PCLK 35
+#define CLK_GOUT_APM_MAILBOX_APM_GSA_PCLK 36
+#define CLK_GOUT_APM_MAILBOX_APM_SWD_PCLK 37
+#define CLK_GOUT_APM_MAILBOX_APM_TPU_PCLK 38
+#define CLK_GOUT_APM_MAILBOX_AP_AOC_PCLK 39
+#define CLK_GOUT_APM_MAILBOX_AP_DBGCORE_PCLK 40
+#define CLK_GOUT_APM_PMU_INTR_GEN_PCLK 41
+#define CLK_GOUT_APM_ROM_CRC32_HOST_ACLK 42
+#define CLK_GOUT_APM_ROM_CRC32_HOST_PCLK 43
+#define CLK_GOUT_APM_CLK_APM_BUS_CLK 44
+#define CLK_GOUT_APM_CLK_APM_USI0_UART_CLK 45
+#define CLK_GOUT_APM_CLK_APM_USI0_USI_CLK 46
+#define CLK_GOUT_APM_CLK_APM_USI1_UART_CLK 47
+#define CLK_GOUT_APM_SPEEDY_APM_PCLK 48
+#define CLK_GOUT_APM_SPEEDY_SUB_APM_PCLK 49
+#define CLK_GOUT_APM_SSMT_D_APM_ACLK 50
+#define CLK_GOUT_APM_SSMT_D_APM_PCLK 51
+#define CLK_GOUT_APM_SSMT_G_DBGCORE_ACLK 52
+#define CLK_GOUT_APM_SSMT_G_DBGCORE_PCLK 53
+#define CLK_GOUT_APM_SS_DBGCORE_SS_DBGCORE_HCLK 54
+#define CLK_GOUT_APM_SYSMMU_D_APM_CLK_S2 55
+#define CLK_GOUT_APM_SYSREG_APM_PCLK 56
+#define CLK_GOUT_APM_UASC_APM_ACLK 57
+#define CLK_GOUT_APM_UASC_APM_PCLK 58
+#define CLK_GOUT_APM_UASC_DBGCORE_ACLK 59
+#define CLK_GOUT_APM_UASC_DBGCORE_PCLK 60
+#define CLK_GOUT_APM_UASC_G_SWD_ACLK 61
+#define CLK_GOUT_APM_UASC_G_SWD_PCLK 62
+#define CLK_GOUT_APM_UASC_P_AOCAPM_ACLK 63
+#define CLK_GOUT_APM_UASC_P_AOCAPM_PCLK 64
+#define CLK_GOUT_APM_UASC_P_APM_ACLK 65
+#define CLK_GOUT_APM_UASC_P_APM_PCLK 66
+#define CLK_GOUT_APM_WDT_APM_PCLK 67
+#define CLK_GOUT_APM_XIU_DP_APM_ACLK 68
+#define CLK_APM_PLL_DIV2_APM 69
+#define CLK_APM_PLL_DIV4_APM 70
+#define CLK_APM_PLL_DIV16_APM 71
+
+/* CMU_MISC */
+#define CLK_MOUT_MISC_BUS_USER 1
+#define CLK_MOUT_MISC_SSS_USER 2
+#define CLK_MOUT_MISC_GIC 3
+#define CLK_DOUT_MISC_BUSP 4
+#define CLK_DOUT_MISC_GIC 5
+#define CLK_GOUT_MISC_MISC_CMU_MISC_PCLK 6
+#define CLK_GOUT_MISC_OTP_CON_BIRA_I_OSCCLK 7
+#define CLK_GOUT_MISC_OTP_CON_BISR_I_OSCCLK 8
+#define CLK_GOUT_MISC_OTP_CON_TOP_I_OSCCLK 9
+#define CLK_GOUT_MISC_CLK_MISC_OSCCLK_CLK 10
+#define CLK_GOUT_MISC_ADM_AHB_SSS_HCLKM 11
+#define CLK_GOUT_MISC_AD_APB_DIT_PCLKM 12
+#define CLK_GOUT_MISC_AD_APB_PUF_PCLKM 13
+#define CLK_GOUT_MISC_DIT_ICLKL2A 14
+#define CLK_GOUT_MISC_D_TZPC_MISC_PCLK 15
+#define CLK_GOUT_MISC_GIC_GICCLK 16
+#define CLK_GOUT_MISC_GPC_MISC_PCLK 17
+#define CLK_GOUT_MISC_LHM_AST_ICC_CPUGIC_I_CLK 18
+#define CLK_GOUT_MISC_LHM_AXI_D_SSS_I_CLK 19
+#define CLK_GOUT_MISC_LHM_AXI_P_GIC_I_CLK 20
+#define CLK_GOUT_MISC_LHM_AXI_P_MISC_I_CLK 21
+#define CLK_GOUT_MISC_LHS_ACEL_D_MISC_I_CLK 22
+#define CLK_GOUT_MISC_LHS_AST_IRI_GICCPU_I_CLK 23
+#define CLK_GOUT_MISC_LHS_AXI_D_SSS_I_CLK 24
+#define CLK_GOUT_MISC_MCT_PCLK 25
+#define CLK_GOUT_MISC_OTP_CON_BIRA_PCLK 26
+#define CLK_GOUT_MISC_OTP_CON_BISR_PCLK 27
+#define CLK_GOUT_MISC_OTP_CON_TOP_PCLK 28
+#define CLK_GOUT_MISC_PDMA_ACLK 29
+#define CLK_GOUT_MISC_PPMU_DMA_ACLK 30
+#define CLK_GOUT_MISC_PPMU_MISC_ACLK 31
+#define CLK_GOUT_MISC_PPMU_MISC_PCLK 32
+#define CLK_GOUT_MISC_PUF_I_CLK 33
+#define CLK_GOUT_MISC_QE_DIT_ACLK 34
+#define CLK_GOUT_MISC_QE_DIT_PCLK 35
+#define CLK_GOUT_MISC_QE_PDMA_ACLK 36
+#define CLK_GOUT_MISC_QE_PDMA_PCLK 37
+#define CLK_GOUT_MISC_QE_PPMU_DMA_ACLK 38
+#define CLK_GOUT_MISC_QE_PPMU_DMA_PCLK 39
+#define CLK_GOUT_MISC_QE_RTIC_ACLK 40
+#define CLK_GOUT_MISC_QE_RTIC_PCLK 41
+#define CLK_GOUT_MISC_QE_SPDMA_ACLK 42
+#define CLK_GOUT_MISC_QE_SPDMA_PCLK 43
+#define CLK_GOUT_MISC_QE_SSS_ACLK 44
+#define CLK_GOUT_MISC_QE_SSS_PCLK 45
+#define CLK_GOUT_MISC_CLK_MISC_BUSD_CLK 46
+#define CLK_GOUT_MISC_CLK_MISC_BUSP_CLK 47
+#define CLK_GOUT_MISC_CLK_MISC_GIC_CLK 48
+#define CLK_GOUT_MISC_CLK_MISC_SSS_CLK 49
+#define CLK_GOUT_MISC_RTIC_I_ACLK 50
+#define CLK_GOUT_MISC_RTIC_I_PCLK 51
+#define CLK_GOUT_MISC_SPDMA_ACLK 52
+#define CLK_GOUT_MISC_SSMT_DIT_ACLK 53
+#define CLK_GOUT_MISC_SSMT_DIT_PCLK 54
+#define CLK_GOUT_MISC_SSMT_PDMA_ACLK 55
+#define CLK_GOUT_MISC_SSMT_PDMA_PCLK 56
+#define CLK_GOUT_MISC_SSMT_PPMU_DMA_ACLK 57
+#define CLK_GOUT_MISC_SSMT_PPMU_DMA_PCLK 58
+#define CLK_GOUT_MISC_SSMT_RTIC_ACLK 59
+#define CLK_GOUT_MISC_SSMT_RTIC_PCLK 60
+#define CLK_GOUT_MISC_SSMT_SPDMA_ACLK 61
+#define CLK_GOUT_MISC_SSMT_SPDMA_PCLK 62
+#define CLK_GOUT_MISC_SSMT_SSS_ACLK 63
+#define CLK_GOUT_MISC_SSMT_SSS_PCLK 64
+#define CLK_GOUT_MISC_SSS_I_ACLK 65
+#define CLK_GOUT_MISC_SSS_I_PCLK 66
+#define CLK_GOUT_MISC_SYSMMU_MISC_CLK_S2 67
+#define CLK_GOUT_MISC_SYSMMU_SSS_CLK_S1 68
+#define CLK_GOUT_MISC_SYSREG_MISC_PCLK 69
+#define CLK_GOUT_MISC_TMU_SUB_PCLK 70
+#define CLK_GOUT_MISC_TMU_TOP_PCLK 71
+#define CLK_GOUT_MISC_WDT_CLUSTER0_PCLK 72
+#define CLK_GOUT_MISC_WDT_CLUSTER1_PCLK 73
+#define CLK_GOUT_MISC_XIU_D_MISC_ACLK 74
+
+#endif /* _DT_BINDINGS_CLOCK_GOOGLE_GS101_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 4073eb7a9da1..c0ce5e9c4151 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -15,6 +15,8 @@
#define CLKID_FCLK_DIV5 7
#define CLKID_FCLK_DIV7 8
#define CLKID_GP0_PLL 9
+#define CLKID_MPEG_SEL 10
+#define CLKID_MPEG_DIV 11
#define CLKID_CLK81 12
#define CLKID_MPLL0 13
#define CLKID_MPLL1 14
@@ -102,35 +104,92 @@
#define CLKID_SD_EMMC_C 96
#define CLKID_SAR_ADC_CLK 97
#define CLKID_SAR_ADC_SEL 98
+#define CLKID_SAR_ADC_DIV 99
#define CLKID_MALI_0_SEL 100
+#define CLKID_MALI_0_DIV 101
#define CLKID_MALI_0 102
#define CLKID_MALI_1_SEL 103
+#define CLKID_MALI_1_DIV 104
#define CLKID_MALI_1 105
#define CLKID_MALI 106
#define CLKID_CTS_AMCLK 107
+#define CLKID_CTS_AMCLK_SEL 108
+#define CLKID_CTS_AMCLK_DIV 109
#define CLKID_CTS_MCLK_I958 110
+#define CLKID_CTS_MCLK_I958_SEL 111
+#define CLKID_CTS_MCLK_I958_DIV 112
#define CLKID_CTS_I958 113
#define CLKID_32K_CLK 114
+#define CLKID_32K_CLK_SEL 115
+#define CLKID_32K_CLK_DIV 116
+#define CLKID_SD_EMMC_A_CLK0_SEL 117
+#define CLKID_SD_EMMC_A_CLK0_DIV 118
#define CLKID_SD_EMMC_A_CLK0 119
+#define CLKID_SD_EMMC_B_CLK0_SEL 120
+#define CLKID_SD_EMMC_B_CLK0_DIV 121
#define CLKID_SD_EMMC_B_CLK0 122
+#define CLKID_SD_EMMC_C_CLK0_SEL 123
+#define CLKID_SD_EMMC_C_CLK0_DIV 124
#define CLKID_SD_EMMC_C_CLK0 125
#define CLKID_VPU_0_SEL 126
+#define CLKID_VPU_0_DIV 127
#define CLKID_VPU_0 128
#define CLKID_VPU_1_SEL 129
+#define CLKID_VPU_1_DIV 130
#define CLKID_VPU_1 131
#define CLKID_VPU 132
#define CLKID_VAPB_0_SEL 133
+#define CLKID_VAPB_0_DIV 134
#define CLKID_VAPB_0 135
#define CLKID_VAPB_1_SEL 136
+#define CLKID_VAPB_1_DIV 137
#define CLKID_VAPB_1 138
#define CLKID_VAPB_SEL 139
#define CLKID_VAPB 140
+#define CLKID_HDMI_PLL_PRE_MULT 141
+#define CLKID_MPLL0_DIV 142
+#define CLKID_MPLL1_DIV 143
+#define CLKID_MPLL2_DIV 144
+#define CLKID_MPLL_PREDIV 145
+#define CLKID_FCLK_DIV2_DIV 146
+#define CLKID_FCLK_DIV3_DIV 147
+#define CLKID_FCLK_DIV4_DIV 148
+#define CLKID_FCLK_DIV5_DIV 149
+#define CLKID_FCLK_DIV7_DIV 150
+#define CLKID_VDEC_1_SEL 151
+#define CLKID_VDEC_1_DIV 152
#define CLKID_VDEC_1 153
+#define CLKID_VDEC_HEVC_SEL 154
+#define CLKID_VDEC_HEVC_DIV 155
#define CLKID_VDEC_HEVC 156
+#define CLKID_GEN_CLK_SEL 157
+#define CLKID_GEN_CLK_DIV 158
#define CLKID_GEN_CLK 159
+#define CLKID_FIXED_PLL_DCO 160
+#define CLKID_HDMI_PLL_DCO 161
+#define CLKID_HDMI_PLL_OD 162
+#define CLKID_HDMI_PLL_OD2 163
+#define CLKID_SYS_PLL_DCO 164
+#define CLKID_GP0_PLL_DCO 165
#define CLKID_VID_PLL 166
+#define CLKID_VID_PLL_SEL 167
+#define CLKID_VID_PLL_DIV 168
+#define CLKID_VCLK_SEL 169
+#define CLKID_VCLK2_SEL 170
+#define CLKID_VCLK_INPUT 171
+#define CLKID_VCLK2_INPUT 172
+#define CLKID_VCLK_DIV 173
+#define CLKID_VCLK2_DIV 174
#define CLKID_VCLK 175
#define CLKID_VCLK2 176
+#define CLKID_VCLK_DIV2_EN 177
+#define CLKID_VCLK_DIV4_EN 178
+#define CLKID_VCLK_DIV6_EN 179
+#define CLKID_VCLK_DIV12_EN 180
+#define CLKID_VCLK2_DIV2_EN 181
+#define CLKID_VCLK2_DIV4_EN 182
+#define CLKID_VCLK2_DIV6_EN 183
+#define CLKID_VCLK2_DIV12_EN 184
#define CLKID_VCLK_DIV1 185
#define CLKID_VCLK_DIV2 186
#define CLKID_VCLK_DIV4 187
@@ -141,10 +200,16 @@
#define CLKID_VCLK2_DIV4 192
#define CLKID_VCLK2_DIV6 193
#define CLKID_VCLK2_DIV12 194
+#define CLKID_CTS_ENCI_SEL 195
+#define CLKID_CTS_ENCP_SEL 196
+#define CLKID_CTS_VDAC_SEL 197
+#define CLKID_HDMI_TX_SEL 198
#define CLKID_CTS_ENCI 199
#define CLKID_CTS_ENCP 200
#define CLKID_CTS_VDAC 201
#define CLKID_HDMI_TX 202
+#define CLKID_HDMI_SEL 203
+#define CLKID_HDMI_DIV 204
#define CLKID_HDMI 205
#define CLKID_ACODEC 206
diff --git a/include/dt-bindings/clock/hi3559av100-clock.h b/include/dt-bindings/clock/hi3559av100-clock.h
index 5fe7689010a0..a4f0e997546c 100644
--- a/include/dt-bindings/clock/hi3559av100-clock.h
+++ b/include/dt-bindings/clock/hi3559av100-clock.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later or BSD-2-Clause */
+/* SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause */
/*
* Copyright (c) 2019-2020, Huawei Tech. Co., Ltd.
*
diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h
index 2e60ce4d2622..2242ff54fc5e 100644
--- a/include/dt-bindings/clock/imx8-clock.h
+++ b/include/dt-bindings/clock/imx8-clock.h
@@ -164,4 +164,32 @@
#define IMX_ADMA_LPCG_CLK_END 45
+#define IMX_ADMA_ACM_AUD_CLK0_SEL 0
+#define IMX_ADMA_ACM_AUD_CLK1_SEL 1
+#define IMX_ADMA_ACM_MCLKOUT0_SEL 2
+#define IMX_ADMA_ACM_MCLKOUT1_SEL 3
+#define IMX_ADMA_ACM_ESAI0_MCLK_SEL 4
+#define IMX_ADMA_ACM_ESAI1_MCLK_SEL 5
+#define IMX_ADMA_ACM_GPT0_MUX_CLK_SEL 6
+#define IMX_ADMA_ACM_GPT1_MUX_CLK_SEL 7
+#define IMX_ADMA_ACM_GPT2_MUX_CLK_SEL 8
+#define IMX_ADMA_ACM_GPT3_MUX_CLK_SEL 9
+#define IMX_ADMA_ACM_GPT4_MUX_CLK_SEL 10
+#define IMX_ADMA_ACM_GPT5_MUX_CLK_SEL 11
+#define IMX_ADMA_ACM_SAI0_MCLK_SEL 12
+#define IMX_ADMA_ACM_SAI1_MCLK_SEL 13
+#define IMX_ADMA_ACM_SAI2_MCLK_SEL 14
+#define IMX_ADMA_ACM_SAI3_MCLK_SEL 15
+#define IMX_ADMA_ACM_SAI4_MCLK_SEL 16
+#define IMX_ADMA_ACM_SAI5_MCLK_SEL 17
+#define IMX_ADMA_ACM_SAI6_MCLK_SEL 18
+#define IMX_ADMA_ACM_SAI7_MCLK_SEL 19
+#define IMX_ADMA_ACM_SPDIF0_TX_CLK_SEL 20
+#define IMX_ADMA_ACM_SPDIF1_TX_CLK_SEL 21
+#define IMX_ADMA_ACM_MQS_TX_CLK_SEL 22
+#define IMX_ADMA_ACM_ASRC0_MUX_CLK_SEL 23
+#define IMX_ADMA_ACM_ASRC1_MUX_CLK_SEL 24
+
+#define IMX_ADMA_ACM_CLK_END 25
+
#endif /* __DT_BINDINGS_CLOCK_IMX_H */
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 3f28ce685f41..7da4243984b2 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -130,7 +130,7 @@
#define IMX8MP_CLK_SAI1 123
#define IMX8MP_CLK_SAI2 124
#define IMX8MP_CLK_SAI3 125
-#define IMX8MP_CLK_SAI4 126
+/* #define IMX8MP_CLK_SAI4 126 */
#define IMX8MP_CLK_SAI5 127
#define IMX8MP_CLK_SAI6 128
#define IMX8MP_CLK_ENET_QOS 129
@@ -376,7 +376,6 @@
#define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36
#define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37
#define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38
-#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39
#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40
#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41
#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42
diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
index 35a1f62053a5..787c9e74dc96 100644
--- a/include/dt-bindings/clock/imx93-clock.h
+++ b/include/dt-bindings/clock/imx93-clock.h
@@ -203,6 +203,7 @@
#define IMX93_CLK_ARM_PLL 198
#define IMX93_CLK_A55_SEL 199
#define IMX93_CLK_A55_CORE 200
-#define IMX93_CLK_END 201
+#define IMX93_CLK_PDM_IPG 201
+#define IMX93_CLK_END 202
#endif
diff --git a/include/dt-bindings/clock/intel,agilex5-clkmgr.h b/include/dt-bindings/clock/intel,agilex5-clkmgr.h
new file mode 100644
index 000000000000..2f3a23b31c5c
--- /dev/null
+++ b/include/dt-bindings/clock/intel,agilex5-clkmgr.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Intel Corporation
+ */
+
+#ifndef __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H
+#define __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H
+
+/* fixed rate clocks */
+#define AGILEX5_OSC1 0
+#define AGILEX5_CB_INTOSC_HS_DIV2_CLK 1
+#define AGILEX5_CB_INTOSC_LS_CLK 2
+#define AGILEX5_F2S_FREE_CLK 3
+
+/* PLL clocks */
+#define AGILEX5_MAIN_PLL_CLK 4
+#define AGILEX5_MAIN_PLL_C0_CLK 5
+#define AGILEX5_MAIN_PLL_C1_CLK 6
+#define AGILEX5_MAIN_PLL_C2_CLK 7
+#define AGILEX5_MAIN_PLL_C3_CLK 8
+#define AGILEX5_PERIPH_PLL_CLK 9
+#define AGILEX5_PERIPH_PLL_C0_CLK 10
+#define AGILEX5_PERIPH_PLL_C1_CLK 11
+#define AGILEX5_PERIPH_PLL_C2_CLK 12
+#define AGILEX5_PERIPH_PLL_C3_CLK 13
+#define AGILEX5_CORE0_FREE_CLK 14
+#define AGILEX5_CORE1_FREE_CLK 15
+#define AGILEX5_CORE2_FREE_CLK 16
+#define AGILEX5_CORE3_FREE_CLK 17
+#define AGILEX5_DSU_FREE_CLK 18
+#define AGILEX5_BOOT_CLK 19
+
+/* fixed factor clocks */
+#define AGILEX5_L3_MAIN_FREE_CLK 20
+#define AGILEX5_NOC_FREE_CLK 21
+#define AGILEX5_S2F_USR0_CLK 22
+#define AGILEX5_NOC_CLK 23
+#define AGILEX5_EMAC_A_FREE_CLK 24
+#define AGILEX5_EMAC_B_FREE_CLK 25
+#define AGILEX5_EMAC_PTP_FREE_CLK 26
+#define AGILEX5_GPIO_DB_FREE_CLK 27
+#define AGILEX5_S2F_USER0_FREE_CLK 28
+#define AGILEX5_S2F_USER1_FREE_CLK 29
+#define AGILEX5_PSI_REF_FREE_CLK 30
+#define AGILEX5_USB31_FREE_CLK 31
+
+/* Gate clocks */
+#define AGILEX5_CORE0_CLK 32
+#define AGILEX5_CORE1_CLK 33
+#define AGILEX5_CORE2_CLK 34
+#define AGILEX5_CORE3_CLK 35
+#define AGILEX5_MPU_CLK 36
+#define AGILEX5_MPU_PERIPH_CLK 37
+#define AGILEX5_MPU_CCU_CLK 38
+#define AGILEX5_L4_MAIN_CLK 39
+#define AGILEX5_L4_MP_CLK 40
+#define AGILEX5_L4_SYS_FREE_CLK 41
+#define AGILEX5_L4_SP_CLK 42
+#define AGILEX5_CS_AT_CLK 43
+#define AGILEX5_CS_TRACE_CLK 44
+#define AGILEX5_CS_PDBG_CLK 45
+#define AGILEX5_EMAC1_CLK 47
+#define AGILEX5_EMAC2_CLK 48
+#define AGILEX5_EMAC_PTP_CLK 49
+#define AGILEX5_GPIO_DB_CLK 50
+#define AGILEX5_S2F_USER0_CLK 51
+#define AGILEX5_S2F_USER1_CLK 52
+#define AGILEX5_PSI_REF_CLK 53
+#define AGILEX5_USB31_SUSPEND_CLK 54
+#define AGILEX5_EMAC0_CLK 46
+#define AGILEX5_USB31_BUS_CLK_EARLY 55
+#define AGILEX5_USB2OTG_HCLK 56
+#define AGILEX5_SPIM_0_CLK 57
+#define AGILEX5_SPIM_1_CLK 58
+#define AGILEX5_SPIS_0_CLK 59
+#define AGILEX5_SPIS_1_CLK 60
+#define AGILEX5_DMA_CORE_CLK 61
+#define AGILEX5_DMA_HS_CLK 62
+#define AGILEX5_I3C_0_CORE_CLK 63
+#define AGILEX5_I3C_1_CORE_CLK 64
+#define AGILEX5_I2C_0_PCLK 65
+#define AGILEX5_I2C_1_PCLK 66
+#define AGILEX5_I2C_EMAC0_PCLK 67
+#define AGILEX5_I2C_EMAC1_PCLK 68
+#define AGILEX5_I2C_EMAC2_PCLK 69
+#define AGILEX5_UART_0_PCLK 70
+#define AGILEX5_UART_1_PCLK 71
+#define AGILEX5_SPTIMER_0_PCLK 72
+#define AGILEX5_SPTIMER_1_PCLK 73
+#define AGILEX5_DFI_CLK 74
+#define AGILEX5_NAND_NF_CLK 75
+#define AGILEX5_NAND_BCH_CLK 76
+#define AGILEX5_SDMMC_SDPHY_REG_CLK 77
+#define AGILEX5_SDMCLK 78
+#define AGILEX5_SOFTPHY_REG_PCLK 79
+#define AGILEX5_SOFTPHY_PHY_CLK 80
+#define AGILEX5_SOFTPHY_CTRL_CLK 81
+#define AGILEX5_NUM_CLKS 82
+
+#endif /* __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2-audio.h b/include/dt-bindings/clock/marvell,mmp2-audio.h
index 20664776f497..9653e04dedc3 100644
--- a/include/dt-bindings/clock/marvell,mmp2-audio.h
+++ b/include/dt-bindings/clock/marvell,mmp2-audio.h
@@ -6,5 +6,4 @@
#define MMP2_CLK_AUDIO_SSPA0 1
#define MMP2_CLK_AUDIO_SSPA1 2
-#define MMP2_CLK_AUDIO_NR_CLKS 3
#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index f0819d66b230..88c2d716476f 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -91,5 +91,4 @@
#define MMP3_CLK_SDH4 126
#define MMP2_CLK_AUDIO 127
-#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
index c92d969ae941..d1bb59187e1d 100644
--- a/include/dt-bindings/clock/marvell,pxa168.h
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -63,5 +63,4 @@
#define PXA168_CLK_SDH01_AXI 111
#define PXA168_CLK_SDH23_AXI 112
-#define PXA168_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h
index 5dca4820297f..0c708d3d3314 100644
--- a/include/dt-bindings/clock/marvell,pxa1928.h
+++ b/include/dt-bindings/clock/marvell,pxa1928.h
@@ -36,7 +36,6 @@
#define PXA1928_CLK_THSENS_CPU 0x26
#define PXA1928_CLK_THSENS_VPU 0x27
#define PXA1928_CLK_THSENS_GC 0x28
-#define PXA1928_APBC_NR_CLKS 0x30
/* axi peripherals */
@@ -53,6 +52,4 @@
#define PXA1928_CLK_GC3D 0x5d
#define PXA1928_CLK_GC2D 0x5f
-#define PXA1928_APMU_NR_CLKS 0x60
-
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
index c9018ab354d0..6caa231de0c1 100644
--- a/include/dt-bindings/clock/marvell,pxa910.h
+++ b/include/dt-bindings/clock/marvell,pxa910.h
@@ -55,5 +55,4 @@
#define PXA910_CLK_CCIC0_PHY 108
#define PXA910_CLK_CCIC0_SPHY 109
-#define PXA910_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/mediatek,mt7988-clk.h b/include/dt-bindings/clock/mediatek,mt7988-clk.h
new file mode 100644
index 000000000000..63376e40f14d
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt7988-clk.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Xiufeng Li <Xiufeng.Li@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7988_H
+#define _DT_BINDINGS_CLK_MT7988_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_NETSYSPLL 0
+#define CLK_APMIXED_MPLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_APLL2 3
+#define CLK_APMIXED_NET1PLL 4
+#define CLK_APMIXED_NET2PLL 5
+#define CLK_APMIXED_WEDMCUPLL 6
+#define CLK_APMIXED_SGMPLL 7
+#define CLK_APMIXED_ARM_B 8
+#define CLK_APMIXED_CCIPLL2_B 9
+#define CLK_APMIXED_USXGMIIPLL 10
+#define CLK_APMIXED_MSDCPLL 11
+
+/* TOPCKGEN */
+
+#define CLK_TOP_XTAL 0
+#define CLK_TOP_XTAL_D2 1
+#define CLK_TOP_RTC_32K 2
+#define CLK_TOP_RTC_32P7K 3
+#define CLK_TOP_MPLL_D2 4
+#define CLK_TOP_MPLL_D3_D2 5
+#define CLK_TOP_MPLL_D4 6
+#define CLK_TOP_MPLL_D8 7
+#define CLK_TOP_MPLL_D8_D2 8
+#define CLK_TOP_MMPLL_D2 9
+#define CLK_TOP_MMPLL_D3_D5 10
+#define CLK_TOP_MMPLL_D4 11
+#define CLK_TOP_MMPLL_D6_D2 12
+#define CLK_TOP_MMPLL_D8 13
+#define CLK_TOP_APLL2_D4 14
+#define CLK_TOP_NET1PLL_D4 15
+#define CLK_TOP_NET1PLL_D5 16
+#define CLK_TOP_NET1PLL_D5_D2 17
+#define CLK_TOP_NET1PLL_D5_D4 18
+#define CLK_TOP_NET1PLL_D8 19
+#define CLK_TOP_NET1PLL_D8_D2 20
+#define CLK_TOP_NET1PLL_D8_D4 21
+#define CLK_TOP_NET1PLL_D8_D8 22
+#define CLK_TOP_NET1PLL_D8_D16 23
+#define CLK_TOP_NET2PLL_D2 24
+#define CLK_TOP_NET2PLL_D4 25
+#define CLK_TOP_NET2PLL_D4_D4 26
+#define CLK_TOP_NET2PLL_D4_D8 27
+#define CLK_TOP_NET2PLL_D6 28
+#define CLK_TOP_NET2PLL_D8 29
+#define CLK_TOP_NETSYS_SEL 30
+#define CLK_TOP_NETSYS_500M_SEL 31
+#define CLK_TOP_NETSYS_2X_SEL 32
+#define CLK_TOP_NETSYS_GSW_SEL 33
+#define CLK_TOP_ETH_GMII_SEL 34
+#define CLK_TOP_NETSYS_MCU_SEL 35
+#define CLK_TOP_NETSYS_PAO_2X_SEL 36
+#define CLK_TOP_EIP197_SEL 37
+#define CLK_TOP_AXI_INFRA_SEL 38
+#define CLK_TOP_UART_SEL 39
+#define CLK_TOP_EMMC_250M_SEL 40
+#define CLK_TOP_EMMC_400M_SEL 41
+#define CLK_TOP_SPI_SEL 42
+#define CLK_TOP_SPIM_MST_SEL 43
+#define CLK_TOP_NFI1X_SEL 44
+#define CLK_TOP_SPINFI_SEL 45
+#define CLK_TOP_PWM_SEL 46
+#define CLK_TOP_I2C_SEL 47
+#define CLK_TOP_PCIE_MBIST_250M_SEL 48
+#define CLK_TOP_PEXTP_TL_SEL 49
+#define CLK_TOP_PEXTP_TL_P1_SEL 50
+#define CLK_TOP_PEXTP_TL_P2_SEL 51
+#define CLK_TOP_PEXTP_TL_P3_SEL 52
+#define CLK_TOP_USB_SYS_SEL 53
+#define CLK_TOP_USB_SYS_P1_SEL 54
+#define CLK_TOP_USB_XHCI_SEL 55
+#define CLK_TOP_USB_XHCI_P1_SEL 56
+#define CLK_TOP_USB_FRMCNT_SEL 57
+#define CLK_TOP_USB_FRMCNT_P1_SEL 58
+#define CLK_TOP_AUD_SEL 59
+#define CLK_TOP_A1SYS_SEL 60
+#define CLK_TOP_AUD_L_SEL 61
+#define CLK_TOP_A_TUNER_SEL 62
+#define CLK_TOP_SSPXTP_SEL 63
+#define CLK_TOP_USB_PHY_SEL 64
+#define CLK_TOP_USXGMII_SBUS_0_SEL 65
+#define CLK_TOP_USXGMII_SBUS_1_SEL 66
+#define CLK_TOP_SGM_0_SEL 67
+#define CLK_TOP_SGM_SBUS_0_SEL 68
+#define CLK_TOP_SGM_1_SEL 69
+#define CLK_TOP_SGM_SBUS_1_SEL 70
+#define CLK_TOP_XFI_PHY_0_XTAL_SEL 71
+#define CLK_TOP_XFI_PHY_1_XTAL_SEL 72
+#define CLK_TOP_SYSAXI_SEL 73
+#define CLK_TOP_SYSAPB_SEL 74
+#define CLK_TOP_ETH_REFCK_50M_SEL 75
+#define CLK_TOP_ETH_SYS_200M_SEL 76
+#define CLK_TOP_ETH_SYS_SEL 77
+#define CLK_TOP_ETH_XGMII_SEL 78
+#define CLK_TOP_BUS_TOPS_SEL 79
+#define CLK_TOP_NPU_TOPS_SEL 80
+#define CLK_TOP_DRAMC_SEL 81
+#define CLK_TOP_DRAMC_MD32_SEL 82
+#define CLK_TOP_INFRA_F26M_SEL 83
+#define CLK_TOP_PEXTP_P0_SEL 84
+#define CLK_TOP_PEXTP_P1_SEL 85
+#define CLK_TOP_PEXTP_P2_SEL 86
+#define CLK_TOP_PEXTP_P3_SEL 87
+#define CLK_TOP_DA_XTP_GLB_P0_SEL 88
+#define CLK_TOP_DA_XTP_GLB_P1_SEL 89
+#define CLK_TOP_DA_XTP_GLB_P2_SEL 90
+#define CLK_TOP_DA_XTP_GLB_P3_SEL 91
+#define CLK_TOP_CKM_SEL 92
+#define CLK_TOP_DA_SEL 93
+#define CLK_TOP_PEXTP_SEL 94
+#define CLK_TOP_TOPS_P2_26M_SEL 95
+#define CLK_TOP_MCUSYS_BACKUP_625M_SEL 96
+#define CLK_TOP_NETSYS_SYNC_250M_SEL 97
+#define CLK_TOP_MACSEC_SEL 98
+#define CLK_TOP_NETSYS_TOPS_400M_SEL 99
+#define CLK_TOP_NETSYS_PPEFB_250M_SEL 100
+#define CLK_TOP_NETSYS_WARP_SEL 101
+#define CLK_TOP_ETH_MII_SEL 102
+#define CLK_TOP_NPU_SEL 103
+#define CLK_TOP_AUD_I2S_M 104
+
+/* MCUSYS */
+
+#define CLK_MCU_BUS_DIV_SEL 0
+#define CLK_MCU_ARM_DIV_SEL 1
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_MUX_UART0_SEL 0
+#define CLK_INFRA_MUX_UART1_SEL 1
+#define CLK_INFRA_MUX_UART2_SEL 2
+#define CLK_INFRA_MUX_SPI0_SEL 3
+#define CLK_INFRA_MUX_SPI1_SEL 4
+#define CLK_INFRA_MUX_SPI2_SEL 5
+#define CLK_INFRA_PWM_SEL 6
+#define CLK_INFRA_PWM_CK1_SEL 7
+#define CLK_INFRA_PWM_CK2_SEL 8
+#define CLK_INFRA_PWM_CK3_SEL 9
+#define CLK_INFRA_PWM_CK4_SEL 10
+#define CLK_INFRA_PWM_CK5_SEL 11
+#define CLK_INFRA_PWM_CK6_SEL 12
+#define CLK_INFRA_PWM_CK7_SEL 13
+#define CLK_INFRA_PWM_CK8_SEL 14
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P0_SEL 15
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P1_SEL 16
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P2_SEL 17
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P3_SEL 18
+
+/* INFRACFG */
+
+#define CLK_INFRA_PCIE_PERI_26M_CK_P0 19
+#define CLK_INFRA_PCIE_PERI_26M_CK_P1 20
+#define CLK_INFRA_PCIE_PERI_26M_CK_P2 21
+#define CLK_INFRA_PCIE_PERI_26M_CK_P3 22
+#define CLK_INFRA_66M_GPT_BCK 23
+#define CLK_INFRA_66M_PWM_HCK 24
+#define CLK_INFRA_66M_PWM_BCK 25
+#define CLK_INFRA_66M_PWM_CK1 26
+#define CLK_INFRA_66M_PWM_CK2 27
+#define CLK_INFRA_66M_PWM_CK3 28
+#define CLK_INFRA_66M_PWM_CK4 29
+#define CLK_INFRA_66M_PWM_CK5 30
+#define CLK_INFRA_66M_PWM_CK6 31
+#define CLK_INFRA_66M_PWM_CK7 32
+#define CLK_INFRA_66M_PWM_CK8 33
+#define CLK_INFRA_133M_CQDMA_BCK 34
+#define CLK_INFRA_66M_AUD_SLV_BCK 35
+#define CLK_INFRA_AUD_26M 36
+#define CLK_INFRA_AUD_L 37
+#define CLK_INFRA_AUD_AUD 38
+#define CLK_INFRA_AUD_EG2 39
+#define CLK_INFRA_DRAMC_F26M 40
+#define CLK_INFRA_133M_DBG_ACKM 41
+#define CLK_INFRA_66M_AP_DMA_BCK 42
+#define CLK_INFRA_66M_SEJ_BCK 43
+#define CLK_INFRA_PRE_CK_SEJ_F13M 44
+#define CLK_INFRA_26M_THERM_SYSTEM 45
+#define CLK_INFRA_I2C_BCK 46
+#define CLK_INFRA_52M_UART0_CK 47
+#define CLK_INFRA_52M_UART1_CK 48
+#define CLK_INFRA_52M_UART2_CK 49
+#define CLK_INFRA_NFI 50
+#define CLK_INFRA_SPINFI 51
+#define CLK_INFRA_66M_NFI_HCK 52
+#define CLK_INFRA_104M_SPI0 53
+#define CLK_INFRA_104M_SPI1 54
+#define CLK_INFRA_104M_SPI2_BCK 55
+#define CLK_INFRA_66M_SPI0_HCK 56
+#define CLK_INFRA_66M_SPI1_HCK 57
+#define CLK_INFRA_66M_SPI2_HCK 58
+#define CLK_INFRA_66M_FLASHIF_AXI 59
+#define CLK_INFRA_RTC 60
+#define CLK_INFRA_26M_ADC_BCK 61
+#define CLK_INFRA_RC_ADC 62
+#define CLK_INFRA_MSDC400 63
+#define CLK_INFRA_MSDC2_HCK 64
+#define CLK_INFRA_133M_MSDC_0_HCK 65
+#define CLK_INFRA_66M_MSDC_0_HCK 66
+#define CLK_INFRA_133M_CPUM_BCK 67
+#define CLK_INFRA_BIST2FPC 68
+#define CLK_INFRA_I2C_X16W_MCK_CK_P1 69
+#define CLK_INFRA_I2C_X16W_PCK_CK_P1 70
+#define CLK_INFRA_133M_USB_HCK 71
+#define CLK_INFRA_133M_USB_HCK_CK_P1 72
+#define CLK_INFRA_66M_USB_HCK 73
+#define CLK_INFRA_66M_USB_HCK_CK_P1 74
+#define CLK_INFRA_USB_SYS 75
+#define CLK_INFRA_USB_SYS_CK_P1 76
+#define CLK_INFRA_USB_REF 77
+#define CLK_INFRA_USB_CK_P1 78
+#define CLK_INFRA_USB_FRMCNT 79
+#define CLK_INFRA_USB_FRMCNT_CK_P1 80
+#define CLK_INFRA_USB_PIPE 81
+#define CLK_INFRA_USB_PIPE_CK_P1 82
+#define CLK_INFRA_USB_UTMI 83
+#define CLK_INFRA_USB_UTMI_CK_P1 84
+#define CLK_INFRA_USB_XHCI 85
+#define CLK_INFRA_USB_XHCI_CK_P1 86
+#define CLK_INFRA_PCIE_GFMUX_TL_P0 87
+#define CLK_INFRA_PCIE_GFMUX_TL_P1 88
+#define CLK_INFRA_PCIE_GFMUX_TL_P2 89
+#define CLK_INFRA_PCIE_GFMUX_TL_P3 90
+#define CLK_INFRA_PCIE_PIPE_P0 91
+#define CLK_INFRA_PCIE_PIPE_P1 92
+#define CLK_INFRA_PCIE_PIPE_P2 93
+#define CLK_INFRA_PCIE_PIPE_P3 94
+#define CLK_INFRA_133M_PCIE_CK_P0 95
+#define CLK_INFRA_133M_PCIE_CK_P1 96
+#define CLK_INFRA_133M_PCIE_CK_P2 97
+#define CLK_INFRA_133M_PCIE_CK_P3 98
+
+/* ETHDMA */
+
+#define CLK_ETHDMA_XGP1_EN 0
+#define CLK_ETHDMA_XGP2_EN 1
+#define CLK_ETHDMA_XGP3_EN 2
+#define CLK_ETHDMA_FE_EN 3
+#define CLK_ETHDMA_GP2_EN 4
+#define CLK_ETHDMA_GP1_EN 5
+#define CLK_ETHDMA_GP3_EN 6
+#define CLK_ETHDMA_ESW_EN 7
+#define CLK_ETHDMA_CRYPT0_EN 8
+#define CLK_ETHDMA_NR_CLK 9
+
+/* SGMIISYS_0 */
+
+#define CLK_SGM0_TX_EN 0
+#define CLK_SGM0_RX_EN 1
+#define CLK_SGMII0_NR_CLK 2
+
+/* SGMIISYS_1 */
+
+#define CLK_SGM1_TX_EN 0
+#define CLK_SGM1_RX_EN 1
+#define CLK_SGMII1_NR_CLK 2
+
+/* ETHWARP */
+
+#define CLK_ETHWARP_WOCPU2_EN 0
+#define CLK_ETHWARP_WOCPU1_EN 1
+#define CLK_ETHWARP_WOCPU0_EN 2
+#define CLK_ETHWARP_NR_CLK 3
+
+/* XFIPLL */
+#define CLK_XFIPLL_PLL 0
+#define CLK_XFIPLL_PLL_EN 1
+
+#endif /* _DT_BINDINGS_CLK_MT7988_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 78aa07fd7cc0..385bf243c56c 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -100,29 +100,126 @@
#define CLKID_MPLL0 93
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
+#define CLKID_MPLL0_DIV 96
+#define CLKID_MPLL1_DIV 97
+#define CLKID_MPLL2_DIV 98
+#define CLKID_CPU_IN_SEL 99
+#define CLKID_CPU_IN_DIV2 100
+#define CLKID_CPU_IN_DIV3 101
+#define CLKID_CPU_SCALE_DIV 102
+#define CLKID_CPU_SCALE_OUT_SEL 103
+#define CLKID_MPLL_PREDIV 104
+#define CLKID_FCLK_DIV2_DIV 105
+#define CLKID_FCLK_DIV3_DIV 106
+#define CLKID_FCLK_DIV4_DIV 107
+#define CLKID_FCLK_DIV5_DIV 108
+#define CLKID_FCLK_DIV7_DIV 109
+#define CLKID_NAND_SEL 110
+#define CLKID_NAND_DIV 111
#define CLKID_NAND_CLK 112
+#define CLKID_PLL_FIXED_DCO 113
+#define CLKID_HDMI_PLL_DCO 114
+#define CLKID_PLL_SYS_DCO 115
+#define CLKID_CPU_CLK_DIV2 116
+#define CLKID_CPU_CLK_DIV3 117
+#define CLKID_CPU_CLK_DIV4 118
+#define CLKID_CPU_CLK_DIV5 119
+#define CLKID_CPU_CLK_DIV6 120
+#define CLKID_CPU_CLK_DIV7 121
+#define CLKID_CPU_CLK_DIV8 122
+#define CLKID_APB_SEL 123
#define CLKID_APB 124
+#define CLKID_PERIPH_SEL 125
#define CLKID_PERIPH 126
+#define CLKID_AXI_SEL 127
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
+#define CLKID_L2_DRAM_SEL 129
+#define CLKID_HDMI_PLL_LVDS_OUT 131
#define CLKID_HDMI_PLL_HDMI_OUT 132
+#define CLKID_VID_PLL_IN_SEL 133
+#define CLKID_VID_PLL_IN_EN 134
+#define CLKID_VID_PLL_PRE_DIV 135
+#define CLKID_VID_PLL_POST_DIV 136
#define CLKID_VID_PLL_FINAL_DIV 137
#define CLKID_VCLK_IN_SEL 138
+#define CLKID_VCLK_IN_EN 139
+#define CLKID_VCLK_DIV1 140
+#define CLKID_VCLK_DIV2_DIV 141
+#define CLKID_VCLK_DIV2 142
+#define CLKID_VCLK_DIV4_DIV 143
+#define CLKID_VCLK_DIV4 144
+#define CLKID_VCLK_DIV6_DIV 145
+#define CLKID_VCLK_DIV6 146
+#define CLKID_VCLK_DIV12_DIV 147
+#define CLKID_VCLK_DIV12 148
#define CLKID_VCLK2_IN_SEL 149
+#define CLKID_VCLK2_IN_EN 150
+#define CLKID_VCLK2_DIV1 151
+#define CLKID_VCLK2_DIV2_DIV 152
+#define CLKID_VCLK2_DIV2 153
+#define CLKID_VCLK2_DIV4_DIV 154
+#define CLKID_VCLK2_DIV4 155
+#define CLKID_VCLK2_DIV6_DIV 156
+#define CLKID_VCLK2_DIV6 157
+#define CLKID_VCLK2_DIV12_DIV 158
+#define CLKID_VCLK2_DIV12 159
+#define CLKID_CTS_ENCT_SEL 160
#define CLKID_CTS_ENCT 161
+#define CLKID_CTS_ENCP_SEL 162
#define CLKID_CTS_ENCP 163
+#define CLKID_CTS_ENCI_SEL 164
#define CLKID_CTS_ENCI 165
+#define CLKID_HDMI_TX_PIXEL_SEL 166
#define CLKID_HDMI_TX_PIXEL 167
+#define CLKID_CTS_ENCL_SEL 168
#define CLKID_CTS_ENCL 169
+#define CLKID_CTS_VDAC0_SEL 170
#define CLKID_CTS_VDAC0 171
+#define CLKID_HDMI_SYS_SEL 172
+#define CLKID_HDMI_SYS_DIV 173
#define CLKID_HDMI_SYS 174
+#define CLKID_MALI_0_SEL 175
+#define CLKID_MALI_0_DIV 176
+#define CLKID_MALI_0 177
+#define CLKID_MALI_1_SEL 178
+#define CLKID_MALI_1_DIV 179
+#define CLKID_MALI_1 180
+#define CLKID_GP_PLL_DCO 181
+#define CLKID_GP_PLL 182
+#define CLKID_VPU_0_SEL 183
+#define CLKID_VPU_0_DIV 184
+#define CLKID_VPU_0 185
+#define CLKID_VPU_1_SEL 186
+#define CLKID_VPU_1_DIV 187
+#define CLKID_VPU_1 189
#define CLKID_VPU 190
+#define CLKID_VDEC_1_SEL 191
+#define CLKID_VDEC_1_1_DIV 192
+#define CLKID_VDEC_1_1 193
+#define CLKID_VDEC_1_2_DIV 194
+#define CLKID_VDEC_1_2 195
#define CLKID_VDEC_1 196
+#define CLKID_VDEC_HCODEC_SEL 197
+#define CLKID_VDEC_HCODEC_DIV 198
#define CLKID_VDEC_HCODEC 199
+#define CLKID_VDEC_2_SEL 200
+#define CLKID_VDEC_2_DIV 201
#define CLKID_VDEC_2 202
+#define CLKID_VDEC_HEVC_SEL 203
+#define CLKID_VDEC_HEVC_DIV 204
+#define CLKID_VDEC_HEVC_EN 205
#define CLKID_VDEC_HEVC 206
+#define CLKID_CTS_AMCLK_SEL 207
+#define CLKID_CTS_AMCLK_DIV 208
#define CLKID_CTS_AMCLK 209
+#define CLKID_CTS_MCLK_I958_SEL 210
+#define CLKID_CTS_MCLK_I958_DIV 211
#define CLKID_CTS_MCLK_I958 212
#define CLKID_CTS_I958 213
+#define CLKID_VCLK_EN 214
+#define CLKID_VCLK2_EN 215
+#define CLKID_VID_PLL_LVDS_EN 216
+#define CLKID_HDMI_PLL_DCO_IN 217
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/nuvoton,ma35d1-clk.h b/include/dt-bindings/clock/nuvoton,ma35d1-clk.h
new file mode 100644
index 000000000000..ba2d70f776a6
--- /dev/null
+++ b/include/dt-bindings/clock/nuvoton,ma35d1-clk.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Nuvoton Technologies.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_NUVOTON_MA35D1_CLK_H
+#define __DT_BINDINGS_CLOCK_NUVOTON_MA35D1_CLK_H
+
+/* external and internal oscillator clocks */
+#define HXT 0
+#define HXT_GATE 1
+#define LXT 2
+#define LXT_GATE 3
+#define HIRC 4
+#define HIRC_GATE 5
+#define LIRC 6
+#define LIRC_GATE 7
+/* PLLs */
+#define CAPLL 8
+#define SYSPLL 9
+#define DDRPLL 10
+#define APLL 11
+#define EPLL 12
+#define VPLL 13
+/* EPLL divider */
+#define EPLL_DIV2 14
+#define EPLL_DIV4 15
+#define EPLL_DIV8 16
+/* CPU clock, system clock, AXI, HCLK and PCLK */
+#define CA35CLK_MUX 17
+#define AXICLK_DIV2 18
+#define AXICLK_DIV4 19
+#define AXICLK_MUX 20
+#define SYSCLK0_MUX 21
+#define SYSCLK1_MUX 22
+#define SYSCLK1_DIV2 23
+#define HCLK0 24
+#define HCLK1 25
+#define HCLK2 26
+#define PCLK0 27
+#define PCLK1 28
+#define PCLK2 29
+#define HCLK3 30
+#define PCLK3 31
+#define PCLK4 32
+/* AXI and AHB peripheral clocks */
+#define USBPHY0 33
+#define USBPHY1 34
+#define DDR0_GATE 35
+#define DDR6_GATE 36
+#define CAN0_MUX 37
+#define CAN0_DIV 38
+#define CAN0_GATE 39
+#define CAN1_MUX 40
+#define CAN1_DIV 41
+#define CAN1_GATE 42
+#define CAN2_MUX 43
+#define CAN2_DIV 44
+#define CAN2_GATE 45
+#define CAN3_MUX 46
+#define CAN3_DIV 47
+#define CAN3_GATE 48
+#define SDH0_MUX 49
+#define SDH0_GATE 50
+#define SDH1_MUX 51
+#define SDH1_GATE 52
+#define NAND_GATE 53
+#define USBD_GATE 54
+#define USBH_GATE 55
+#define HUSBH0_GATE 56
+#define HUSBH1_GATE 57
+#define GFX_MUX 58
+#define GFX_GATE 59
+#define VC8K_GATE 60
+#define DCU_MUX 61
+#define DCU_GATE 62
+#define DCUP_DIV 63
+#define EMAC0_GATE 64
+#define EMAC1_GATE 65
+#define CCAP0_MUX 66
+#define CCAP0_DIV 67
+#define CCAP0_GATE 68
+#define CCAP1_MUX 69
+#define CCAP1_DIV 70
+#define CCAP1_GATE 71
+#define PDMA0_GATE 72
+#define PDMA1_GATE 73
+#define PDMA2_GATE 74
+#define PDMA3_GATE 75
+#define WH0_GATE 76
+#define WH1_GATE 77
+#define HWS_GATE 78
+#define EBI_GATE 79
+#define SRAM0_GATE 80
+#define SRAM1_GATE 81
+#define ROM_GATE 82
+#define TRA_GATE 83
+#define DBG_MUX 84
+#define DBG_GATE 85
+#define CKO_MUX 86
+#define CKO_DIV 87
+#define CKO_GATE 88
+#define GTMR_GATE 89
+#define GPA_GATE 90
+#define GPB_GATE 91
+#define GPC_GATE 92
+#define GPD_GATE 93
+#define GPE_GATE 94
+#define GPF_GATE 95
+#define GPG_GATE 96
+#define GPH_GATE 97
+#define GPI_GATE 98
+#define GPJ_GATE 99
+#define GPK_GATE 100
+#define GPL_GATE 101
+#define GPM_GATE 102
+#define GPN_GATE 103
+/* APB peripheral clocks */
+#define TMR0_MUX 104
+#define TMR0_GATE 105
+#define TMR1_MUX 106
+#define TMR1_GATE 107
+#define TMR2_MUX 108
+#define TMR2_GATE 109
+#define TMR3_MUX 110
+#define TMR3_GATE 111
+#define TMR4_MUX 112
+#define TMR4_GATE 113
+#define TMR5_MUX 114
+#define TMR5_GATE 115
+#define TMR6_MUX 116
+#define TMR6_GATE 117
+#define TMR7_MUX 118
+#define TMR7_GATE 119
+#define TMR8_MUX 120
+#define TMR8_GATE 121
+#define TMR9_MUX 122
+#define TMR9_GATE 123
+#define TMR10_MUX 124
+#define TMR10_GATE 125
+#define TMR11_MUX 126
+#define TMR11_GATE 127
+#define UART0_MUX 128
+#define UART0_DIV 129
+#define UART0_GATE 130
+#define UART1_MUX 131
+#define UART1_DIV 132
+#define UART1_GATE 133
+#define UART2_MUX 134
+#define UART2_DIV 135
+#define UART2_GATE 136
+#define UART3_MUX 137
+#define UART3_DIV 138
+#define UART3_GATE 139
+#define UART4_MUX 140
+#define UART4_DIV 141
+#define UART4_GATE 142
+#define UART5_MUX 143
+#define UART5_DIV 144
+#define UART5_GATE 145
+#define UART6_MUX 146
+#define UART6_DIV 147
+#define UART6_GATE 148
+#define UART7_MUX 149
+#define UART7_DIV 150
+#define UART7_GATE 151
+#define UART8_MUX 152
+#define UART8_DIV 153
+#define UART8_GATE 154
+#define UART9_MUX 155
+#define UART9_DIV 156
+#define UART9_GATE 157
+#define UART10_MUX 158
+#define UART10_DIV 159
+#define UART10_GATE 160
+#define UART11_MUX 161
+#define UART11_DIV 162
+#define UART11_GATE 163
+#define UART12_MUX 164
+#define UART12_DIV 165
+#define UART12_GATE 166
+#define UART13_MUX 167
+#define UART13_DIV 168
+#define UART13_GATE 169
+#define UART14_MUX 170
+#define UART14_DIV 171
+#define UART14_GATE 172
+#define UART15_MUX 173
+#define UART15_DIV 174
+#define UART15_GATE 175
+#define UART16_MUX 176
+#define UART16_DIV 177
+#define UART16_GATE 178
+#define RTC_GATE 179
+#define DDR_GATE 180
+#define KPI_MUX 181
+#define KPI_DIV 182
+#define KPI_GATE 183
+#define I2C0_GATE 184
+#define I2C1_GATE 185
+#define I2C2_GATE 186
+#define I2C3_GATE 187
+#define I2C4_GATE 188
+#define I2C5_GATE 189
+#define QSPI0_MUX 190
+#define QSPI0_GATE 191
+#define QSPI1_MUX 192
+#define QSPI1_GATE 193
+#define SMC0_MUX 194
+#define SMC0_DIV 195
+#define SMC0_GATE 196
+#define SMC1_MUX 197
+#define SMC1_DIV 198
+#define SMC1_GATE 199
+#define WDT0_MUX 200
+#define WDT0_GATE 201
+#define WDT1_MUX 202
+#define WDT1_GATE 203
+#define WDT2_MUX 204
+#define WDT2_GATE 205
+#define WWDT0_MUX 206
+#define WWDT1_MUX 207
+#define WWDT2_MUX 208
+#define EPWM0_GATE 209
+#define EPWM1_GATE 210
+#define EPWM2_GATE 211
+#define I2S0_MUX 212
+#define I2S0_GATE 213
+#define I2S1_MUX 214
+#define I2S1_GATE 215
+#define SSMCC_GATE 216
+#define SSPCC_GATE 217
+#define SPI0_MUX 218
+#define SPI0_GATE 219
+#define SPI1_MUX 220
+#define SPI1_GATE 221
+#define SPI2_MUX 222
+#define SPI2_GATE 223
+#define SPI3_MUX 224
+#define SPI3_GATE 225
+#define ECAP0_GATE 226
+#define ECAP1_GATE 227
+#define ECAP2_GATE 228
+#define QEI0_GATE 229
+#define QEI1_GATE 230
+#define QEI2_GATE 231
+#define ADC_DIV 232
+#define ADC_GATE 233
+#define EADC_DIV 234
+#define EADC_GATE 235
+#define CLK_MAX_IDX 236
+
+#endif /* __DT_BINDINGS_CLOCK_NUVOTON_MA35D1_CLK_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
index 7e8a7be6dcda..fa0587857547 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
@@ -165,5 +165,11 @@
#define GCC_QDSS_BCR 69
#define GCC_MPM_BCR 70
#define GCC_SPDM_BCR 71
+#define ESS_MAC1_ARES 72
+#define ESS_MAC2_ARES 73
+#define ESS_MAC3_ARES 74
+#define ESS_MAC4_ARES 75
+#define ESS_MAC5_ARES 76
+#define ESS_PSGMII_ARES 77
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq5018.h b/include/dt-bindings/clock/qcom,gcc-ipq5018.h
new file mode 100644
index 000000000000..f3de2fdfeea1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-ipq5018.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_5018_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_5018_H
+
+#define GPLL0_MAIN 0
+#define GPLL0 1
+#define GPLL2_MAIN 2
+#define GPLL2 3
+#define GPLL4_MAIN 4
+#define GPLL4 5
+#define UBI32_PLL_MAIN 6
+#define UBI32_PLL 7
+#define ADSS_PWM_CLK_SRC 8
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 10
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 14
+#define BLSP1_UART1_APPS_CLK_SRC 15
+#define BLSP1_UART2_APPS_CLK_SRC 16
+#define CRYPTO_CLK_SRC 17
+#define GCC_ADSS_PWM_CLK 18
+#define GCC_BLSP1_AHB_CLK 19
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 20
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 21
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 22
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 23
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 24
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 25
+#define GCC_BLSP1_UART1_APPS_CLK 26
+#define GCC_BLSP1_UART2_APPS_CLK 27
+#define GCC_BTSS_LPO_CLK 28
+#define GCC_CMN_BLK_AHB_CLK 29
+#define GCC_CMN_BLK_SYS_CLK 30
+#define GCC_CRYPTO_AHB_CLK 31
+#define GCC_CRYPTO_AXI_CLK 32
+#define GCC_CRYPTO_CLK 33
+#define GCC_CRYPTO_PPE_CLK 34
+#define GCC_DCC_CLK 35
+#define GCC_GEPHY_RX_CLK 36
+#define GCC_GEPHY_TX_CLK 37
+#define GCC_GMAC0_CFG_CLK 38
+#define GCC_GMAC0_PTP_CLK 39
+#define GCC_GMAC0_RX_CLK 40
+#define GCC_GMAC0_SYS_CLK 41
+#define GCC_GMAC0_TX_CLK 42
+#define GCC_GMAC1_CFG_CLK 43
+#define GCC_GMAC1_PTP_CLK 44
+#define GCC_GMAC1_RX_CLK 45
+#define GCC_GMAC1_SYS_CLK 46
+#define GCC_GMAC1_TX_CLK 47
+#define GCC_GP1_CLK 48
+#define GCC_GP2_CLK 49
+#define GCC_GP3_CLK 50
+#define GCC_LPASS_CORE_AXIM_CLK 51
+#define GCC_LPASS_SWAY_CLK 52
+#define GCC_MDIO0_AHB_CLK 53
+#define GCC_MDIO1_AHB_CLK 54
+#define GCC_PCIE0_AHB_CLK 55
+#define GCC_PCIE0_AUX_CLK 56
+#define GCC_PCIE0_AXI_M_CLK 57
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 58
+#define GCC_PCIE0_AXI_S_CLK 59
+#define GCC_PCIE0_PIPE_CLK 60
+#define GCC_PCIE1_AHB_CLK 61
+#define GCC_PCIE1_AUX_CLK 62
+#define GCC_PCIE1_AXI_M_CLK 63
+#define GCC_PCIE1_AXI_S_BRIDGE_CLK 64
+#define GCC_PCIE1_AXI_S_CLK 65
+#define GCC_PCIE1_PIPE_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_Q6_AXIM_CLK 68
+#define GCC_Q6_AXIM2_CLK 69
+#define GCC_Q6_AXIS_CLK 70
+#define GCC_Q6_AHB_CLK 71
+#define GCC_Q6_AHB_S_CLK 72
+#define GCC_Q6_TSCTR_1TO2_CLK 73
+#define GCC_Q6SS_ATBM_CLK 74
+#define GCC_Q6SS_PCLKDBG_CLK 75
+#define GCC_Q6SS_TRIG_CLK 76
+#define GCC_QDSS_AT_CLK 77
+#define GCC_QDSS_CFG_AHB_CLK 78
+#define GCC_QDSS_DAP_AHB_CLK 79
+#define GCC_QDSS_DAP_CLK 80
+#define GCC_QDSS_ETR_USB_CLK 81
+#define GCC_QDSS_EUD_AT_CLK 82
+#define GCC_QDSS_STM_CLK 83
+#define GCC_QDSS_TRACECLKIN_CLK 84
+#define GCC_QDSS_TSCTR_DIV8_CLK 85
+#define GCC_QPIC_AHB_CLK 86
+#define GCC_QPIC_CLK 87
+#define GCC_QPIC_IO_MACRO_CLK 88
+#define GCC_SDCC1_AHB_CLK 89
+#define GCC_SDCC1_APPS_CLK 90
+#define GCC_SLEEP_CLK_SRC 91
+#define GCC_SNOC_GMAC0_AHB_CLK 92
+#define GCC_SNOC_GMAC0_AXI_CLK 93
+#define GCC_SNOC_GMAC1_AHB_CLK 94
+#define GCC_SNOC_GMAC1_AXI_CLK 95
+#define GCC_SNOC_LPASS_AXIM_CLK 96
+#define GCC_SNOC_LPASS_SWAY_CLK 97
+#define GCC_SNOC_UBI0_AXI_CLK 98
+#define GCC_SYS_NOC_PCIE0_AXI_CLK 99
+#define GCC_SYS_NOC_PCIE1_AXI_CLK 100
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 101
+#define GCC_SYS_NOC_USB0_AXI_CLK 102
+#define GCC_SYS_NOC_WCSS_AHB_CLK 103
+#define GCC_UBI0_AXI_CLK 104
+#define GCC_UBI0_CFG_CLK 105
+#define GCC_UBI0_CORE_CLK 106
+#define GCC_UBI0_DBG_CLK 107
+#define GCC_UBI0_NC_AXI_CLK 108
+#define GCC_UBI0_UTCM_CLK 109
+#define GCC_UNIPHY_AHB_CLK 110
+#define GCC_UNIPHY_RX_CLK 111
+#define GCC_UNIPHY_SYS_CLK 112
+#define GCC_UNIPHY_TX_CLK 113
+#define GCC_USB0_AUX_CLK 114
+#define GCC_USB0_EUD_AT_CLK 115
+#define GCC_USB0_LFPS_CLK 116
+#define GCC_USB0_MASTER_CLK 117
+#define GCC_USB0_MOCK_UTMI_CLK 118
+#define GCC_USB0_PHY_CFG_AHB_CLK 119
+#define GCC_USB0_SLEEP_CLK 120
+#define GCC_WCSS_ACMT_CLK 121
+#define GCC_WCSS_AHB_S_CLK 122
+#define GCC_WCSS_AXI_M_CLK 123
+#define GCC_WCSS_AXI_S_CLK 124
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK 125
+#define GCC_WCSS_DBG_IFC_APB_CLK 126
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK 127
+#define GCC_WCSS_DBG_IFC_ATB_CLK 128
+#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_CLK 129
+#define GCC_WCSS_DBG_IFC_DAPBUS_CLK 130
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK 131
+#define GCC_WCSS_DBG_IFC_NTS_CLK 132
+#define GCC_WCSS_ECAHB_CLK 133
+#define GCC_XO_CLK 134
+#define GCC_XO_CLK_SRC 135
+#define GMAC0_RX_CLK_SRC 136
+#define GMAC0_TX_CLK_SRC 137
+#define GMAC1_RX_CLK_SRC 138
+#define GMAC1_TX_CLK_SRC 139
+#define GMAC_CLK_SRC 140
+#define GP1_CLK_SRC 141
+#define GP2_CLK_SRC 142
+#define GP3_CLK_SRC 143
+#define LPASS_AXIM_CLK_SRC 144
+#define LPASS_SWAY_CLK_SRC 145
+#define PCIE0_AUX_CLK_SRC 146
+#define PCIE0_AXI_CLK_SRC 147
+#define PCIE1_AUX_CLK_SRC 148
+#define PCIE1_AXI_CLK_SRC 149
+#define PCNOC_BFDCD_CLK_SRC 150
+#define Q6_AXI_CLK_SRC 151
+#define QDSS_AT_CLK_SRC 152
+#define QDSS_STM_CLK_SRC 153
+#define QDSS_TSCTR_CLK_SRC 154
+#define QDSS_TRACECLKIN_CLK_SRC 155
+#define QPIC_IO_MACRO_CLK_SRC 156
+#define SDCC1_APPS_CLK_SRC 157
+#define SYSTEM_NOC_BFDCD_CLK_SRC 158
+#define UBI0_AXI_CLK_SRC 159
+#define UBI0_CORE_CLK_SRC 160
+#define USB0_AUX_CLK_SRC 161
+#define USB0_LFPS_CLK_SRC 162
+#define USB0_MASTER_CLK_SRC 163
+#define USB0_MOCK_UTMI_CLK_SRC 164
+#define WCSS_AHB_CLK_SRC 165
+#define PCIE0_PIPE_CLK_SRC 166
+#define PCIE1_PIPE_CLK_SRC 167
+#define USB0_PIPE_CLK_SRC 168
+#define GCC_USB0_PIPE_CLK 169
+#define GMAC0_RX_DIV_CLK_SRC 170
+#define GMAC0_TX_DIV_CLK_SRC 171
+#define GMAC1_RX_DIV_CLK_SRC 172
+#define GMAC1_TX_DIV_CLK_SRC 173
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8917.h b/include/dt-bindings/clock/qcom,gcc-msm8917.h
index a371b1adc896..4b421e7414b5 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8917.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8917.h
@@ -169,6 +169,7 @@
#define VFE0_CLK_SRC 162
#define VFE1_CLK_SRC 163
#define VSYNC_CLK_SRC 164
+#define GPLL0_SLEEP_CLK_SRC 165
/* GCC block resets */
#define GCC_CAMSS_MICRO_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h
index 2d545ed0d35a..9a9bc55b49af 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8939.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h
@@ -193,6 +193,12 @@
#define GCC_VENUS0_CORE1_VCODEC0_CLK 184
#define GCC_OXILI_TIMER_CLK 185
#define SYSTEM_MM_NOC_BFDCD_CLK_SRC 186
+#define CSI2_CLK_SRC 187
+#define GCC_CAMSS_CSI2_AHB_CLK 188
+#define GCC_CAMSS_CSI2_CLK 189
+#define GCC_CAMSS_CSI2PHY_CLK 190
+#define GCC_CAMSS_CSI2PIX_CLK 191
+#define GCC_CAMSS_CSI2RDI_CLK 192
/* Indexes for GDSCs */
#define BIMC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 1badb4f9c58f..b5456a64d421 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -190,6 +190,9 @@
#define AGGRE2_SNOC_NORTH_AXI 181
#define SSC_XO 182
#define SSC_CNOC_AHBS_CLK 183
+#define GCC_MMSS_GPLL0_DIV_CLK 184
+#define GCC_GPU_GPLL0_DIV_CLK 185
+#define GCC_GPU_GPLL0_CLK 186
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
index 721105ea4fad..845491591784 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
@@ -494,5 +494,15 @@
#define USB30_SEC_GDSC 11
#define EMAC_0_GDSC 12
#define EMAC_1_GDSC 13
+#define USB4_1_GDSC 14
+#define USB4_GDSC 15
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 16
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 17
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 18
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 19
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 20
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 21
+#define HLOS1_VOTE_TURING_MMU_TBU2_GDSC 22
+#define HLOS1_VOTE_TURING_MMU_TBU3_GDSC 23
#endif
diff --git a/include/dt-bindings/clock/qcom,ipq9574-gcc.h b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
index 5a2961bfe893..08fd3a37acaa 100644
--- a/include/dt-bindings/clock/qcom,ipq9574-gcc.h
+++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
@@ -210,4 +210,10 @@
#define GCC_SNOC_PCIE1_1LANE_S_CLK 201
#define GCC_SNOC_PCIE2_2LANE_S_CLK 202
#define GCC_SNOC_PCIE3_2LANE_S_CLK 203
+#define GCC_CRYPTO_CLK_SRC 204
+#define GCC_CRYPTO_CLK 205
+#define GCC_CRYPTO_AXI_CLK 206
+#define GCC_CRYPTO_AHB_CLK 207
+#define GCC_USB0_PIPE_CLK 208
+#define GCC_USB0_SLEEP_CLK 209
#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-mdm9615.h b/include/dt-bindings/clock/qcom,lcc-mdm9615.h
deleted file mode 100644
index 299338ee1d88..000000000000
--- a/include/dt-bindings/clock/qcom,lcc-mdm9615.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- * Copyright (c) BayLibre, SAS.
- * Author : Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#ifndef _DT_BINDINGS_CLK_LCC_MDM9615_H
-#define _DT_BINDINGS_CLK_LCC_MDM9615_H
-
-#define PLL4 0
-#define MI2S_OSR_SRC 1
-#define MI2S_OSR_CLK 2
-#define MI2S_DIV_CLK 3
-#define MI2S_BIT_DIV_CLK 4
-#define MI2S_BIT_CLK 5
-#define PCM_SRC 6
-#define PCM_CLK_OUT 7
-#define PCM_CLK 8
-#define SLIMBUS_SRC 9
-#define AUDIO_SLIMBUS_CLK 10
-#define SPS_SLIMBUS_CLK 11
-#define CODEC_I2S_MIC_OSR_SRC 12
-#define CODEC_I2S_MIC_OSR_CLK 13
-#define CODEC_I2S_MIC_DIV_CLK 14
-#define CODEC_I2S_MIC_BIT_DIV_CLK 15
-#define CODEC_I2S_MIC_BIT_CLK 16
-#define SPARE_I2S_MIC_OSR_SRC 17
-#define SPARE_I2S_MIC_OSR_CLK 18
-#define SPARE_I2S_MIC_DIV_CLK 19
-#define SPARE_I2S_MIC_BIT_DIV_CLK 20
-#define SPARE_I2S_MIC_BIT_CLK 21
-#define CODEC_I2S_SPKR_OSR_SRC 22
-#define CODEC_I2S_SPKR_OSR_CLK 23
-#define CODEC_I2S_SPKR_DIV_CLK 24
-#define CODEC_I2S_SPKR_BIT_DIV_CLK 25
-#define CODEC_I2S_SPKR_BIT_CLK 26
-#define SPARE_I2S_SPKR_OSR_SRC 27
-#define SPARE_I2S_SPKR_OSR_CLK 28
-#define SPARE_I2S_SPKR_DIV_CLK 29
-#define SPARE_I2S_SPKR_BIT_DIV_CLK 30
-#define SPARE_I2S_SPKR_BIT_CLK 31
-
-#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
index a62cb0629a7a..743ee60632eb 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
@@ -121,7 +121,6 @@
#define MMSS_MMSSNOC_BTO_AHB_CLK 112
#define MMSS_MMSSNOC_AXI_CLK 113
#define MMSS_S0_AXI_CLK 114
-#define OCMEMCX_AHB_CLK 115
#define OCMEMCX_OCMEMNOC_CLK 116
#define OXILI_OCMEMGX_CLK 117
#define OCMEMNOC_CLK 118
diff --git a/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
new file mode 100644
index 000000000000..731e404a2ce6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H
+#define _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H
+
+/* ECPRI_CC clocks */
+#define ECPRI_CC_PLL0 0
+#define ECPRI_CC_PLL1 1
+#define ECPRI_CC_ECPRI_CG_CLK 2
+#define ECPRI_CC_ECPRI_CLK_SRC 3
+#define ECPRI_CC_ECPRI_DMA_CLK 4
+#define ECPRI_CC_ECPRI_DMA_CLK_SRC 5
+#define ECPRI_CC_ECPRI_DMA_NOC_CLK 6
+#define ECPRI_CC_ECPRI_FAST_CLK 7
+#define ECPRI_CC_ECPRI_FAST_CLK_SRC 8
+#define ECPRI_CC_ECPRI_FAST_DIV2_CLK 9
+#define ECPRI_CC_ECPRI_FAST_DIV2_CLK_SRC 10
+#define ECPRI_CC_ECPRI_FAST_DIV2_NOC_CLK 11
+#define ECPRI_CC_ECPRI_FR_CLK 12
+#define ECPRI_CC_ECPRI_ORAN_CLK_SRC 13
+#define ECPRI_CC_ECPRI_ORAN_DIV2_CLK 14
+#define ECPRI_CC_ETH_100G_C2C0_HM_FF_CLK_SRC 15
+#define ECPRI_CC_ETH_100G_C2C0_UDP_FIFO_CLK 16
+#define ECPRI_CC_ETH_100G_C2C1_UDP_FIFO_CLK 17
+#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_0_CLK 18
+#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_1_CLK 19
+#define ECPRI_CC_ETH_100G_C2C_HM_FF_0_DIV_CLK_SRC 20
+#define ECPRI_CC_ETH_100G_C2C_HM_FF_1_DIV_CLK_SRC 21
+#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK 22
+#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK_SRC 23
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_CLK 24
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_DIV_CLK_SRC 25
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_CLK 26
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_DIV_CLK_SRC 27
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_CLK_SRC 28
+#define ECPRI_CC_ETH_100G_DBG_C2C_UDP_FIFO_CLK 29
+#define ECPRI_CC_ETH_100G_FH0_HM_FF_CLK_SRC 30
+#define ECPRI_CC_ETH_100G_FH0_MACSEC_CLK_SRC 31
+#define ECPRI_CC_ETH_100G_FH1_HM_FF_CLK_SRC 32
+#define ECPRI_CC_ETH_100G_FH1_MACSEC_CLK_SRC 33
+#define ECPRI_CC_ETH_100G_FH2_HM_FF_CLK_SRC 34
+#define ECPRI_CC_ETH_100G_FH2_MACSEC_CLK_SRC 35
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_CLK 36
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_DIV_CLK_SRC 37
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_CLK 38
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_DIV_CLK_SRC 39
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_CLK 40
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_DIV_CLK_SRC 41
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_CLK 42
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_DIV_CLK_SRC 43
+#define ECPRI_CC_ETH_100G_FH_0_UDP_FIFO_CLK 44
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_CLK 45
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_DIV_CLK_SRC 46
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_CLK 47
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_DIV_CLK_SRC 48
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_CLK 49
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_DIV_CLK_SRC 50
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_CLK 51
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_DIV_CLK_SRC 52
+#define ECPRI_CC_ETH_100G_FH_1_UDP_FIFO_CLK 53
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_CLK 54
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_DIV_CLK_SRC 55
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_CLK 56
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_DIV_CLK_SRC 57
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_CLK 58
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_DIV_CLK_SRC 59
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_CLK 60
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_DIV_CLK_SRC 61
+#define ECPRI_CC_ETH_100G_FH_2_UDP_FIFO_CLK 62
+#define ECPRI_CC_ETH_100G_FH_MACSEC_0_CLK 63
+#define ECPRI_CC_ETH_100G_FH_MACSEC_1_CLK 64
+#define ECPRI_CC_ETH_100G_FH_MACSEC_2_CLK 65
+#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK 66
+#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK_SRC 67
+#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK 68
+#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK_SRC 69
+#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK 70
+#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK_SRC 71
+#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK 72
+#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK_SRC 73
+#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK 74
+#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK_SRC 75
+#define ECPRI_CC_ETH_DBG_NFAPI_AXI_CLK 76
+#define ECPRI_CC_ETH_DBG_NOC_AXI_CLK 77
+#define ECPRI_CC_ETH_PHY_0_OCK_SRAM_CLK 78
+#define ECPRI_CC_ETH_PHY_1_OCK_SRAM_CLK 79
+#define ECPRI_CC_ETH_PHY_2_OCK_SRAM_CLK 80
+#define ECPRI_CC_ETH_PHY_3_OCK_SRAM_CLK 81
+#define ECPRI_CC_ETH_PHY_4_OCK_SRAM_CLK 82
+#define ECPRI_CC_MSS_EMAC_CLK 83
+#define ECPRI_CC_MSS_EMAC_CLK_SRC 84
+#define ECPRI_CC_MSS_ORAN_CLK 85
+#define ECPRI_CC_PHY0_LANE0_RX_CLK 86
+#define ECPRI_CC_PHY0_LANE0_TX_CLK 87
+#define ECPRI_CC_PHY0_LANE1_RX_CLK 88
+#define ECPRI_CC_PHY0_LANE1_TX_CLK 89
+#define ECPRI_CC_PHY0_LANE2_RX_CLK 90
+#define ECPRI_CC_PHY0_LANE2_TX_CLK 91
+#define ECPRI_CC_PHY0_LANE3_RX_CLK 92
+#define ECPRI_CC_PHY0_LANE3_TX_CLK 93
+#define ECPRI_CC_PHY1_LANE0_RX_CLK 94
+#define ECPRI_CC_PHY1_LANE0_TX_CLK 95
+#define ECPRI_CC_PHY1_LANE1_RX_CLK 96
+#define ECPRI_CC_PHY1_LANE1_TX_CLK 97
+#define ECPRI_CC_PHY1_LANE2_RX_CLK 98
+#define ECPRI_CC_PHY1_LANE2_TX_CLK 99
+#define ECPRI_CC_PHY1_LANE3_RX_CLK 100
+#define ECPRI_CC_PHY1_LANE3_TX_CLK 101
+#define ECPRI_CC_PHY2_LANE0_RX_CLK 102
+#define ECPRI_CC_PHY2_LANE0_TX_CLK 103
+#define ECPRI_CC_PHY2_LANE1_RX_CLK 104
+#define ECPRI_CC_PHY2_LANE1_TX_CLK 105
+#define ECPRI_CC_PHY2_LANE2_RX_CLK 106
+#define ECPRI_CC_PHY2_LANE2_TX_CLK 107
+#define ECPRI_CC_PHY2_LANE3_RX_CLK 108
+#define ECPRI_CC_PHY2_LANE3_TX_CLK 109
+#define ECPRI_CC_PHY3_LANE0_RX_CLK 110
+#define ECPRI_CC_PHY3_LANE0_TX_CLK 111
+#define ECPRI_CC_PHY3_LANE1_RX_CLK 112
+#define ECPRI_CC_PHY3_LANE1_TX_CLK 113
+#define ECPRI_CC_PHY3_LANE2_RX_CLK 114
+#define ECPRI_CC_PHY3_LANE2_TX_CLK 115
+#define ECPRI_CC_PHY3_LANE3_RX_CLK 116
+#define ECPRI_CC_PHY3_LANE3_TX_CLK 117
+#define ECPRI_CC_PHY4_LANE0_RX_CLK 118
+#define ECPRI_CC_PHY4_LANE0_TX_CLK 119
+#define ECPRI_CC_PHY4_LANE1_RX_CLK 120
+#define ECPRI_CC_PHY4_LANE1_TX_CLK 121
+#define ECPRI_CC_PHY4_LANE2_RX_CLK 122
+#define ECPRI_CC_PHY4_LANE2_TX_CLK 123
+#define ECPRI_CC_PHY4_LANE3_RX_CLK 124
+#define ECPRI_CC_PHY4_LANE3_TX_CLK 125
+
+/* ECPRI_CC resets */
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ECPRI_SS_BCR 0
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_C2C_BCR 1
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH0_BCR 2
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH1_BCR 3
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH2_BCR 4
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_WRAPPER_TOP_BCR 5
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_MODEM_BCR 6
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_NOC_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qdu1000-gcc.h b/include/dt-bindings/clock/qcom,qdu1000-gcc.h
index ddbc6b825e80..2fd36cbfddbb 100644
--- a/include/dt-bindings/clock/qcom,qdu1000-gcc.h
+++ b/include/dt-bindings/clock/qcom,qdu1000-gcc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
/*
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QDU1000_H
@@ -138,6 +138,8 @@
#define GCC_AGGRE_NOC_ECPRI_GSI_CLK 128
#define GCC_PCIE_0_PIPE_CLK_SRC 129
#define GCC_PCIE_0_PHY_AUX_CLK_SRC 130
+#define GCC_GPLL1_OUT_EVEN 131
+#define GCC_DDRSS_ECPRI_GSI_CLK 132
/* GCC resets */
#define GCC_ECPRI_CC_BCR 0
diff --git a/include/dt-bindings/clock/qcom,sc8280xp-camcc.h b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h
new file mode 100644
index 000000000000..ea5ec73c8c6a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__
+#define __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__
+
+/* CAMCC clocks */
+#define CAMCC_PLL0 0
+#define CAMCC_PLL0_OUT_EVEN 1
+#define CAMCC_PLL0_OUT_ODD 2
+#define CAMCC_PLL1 3
+#define CAMCC_PLL1_OUT_EVEN 4
+#define CAMCC_PLL2 5
+#define CAMCC_PLL3 6
+#define CAMCC_PLL3_OUT_EVEN 7
+#define CAMCC_PLL4 8
+#define CAMCC_PLL4_OUT_EVEN 9
+#define CAMCC_PLL5 10
+#define CAMCC_PLL5_OUT_EVEN 11
+#define CAMCC_PLL6 12
+#define CAMCC_PLL6_OUT_EVEN 13
+#define CAMCC_PLL7 14
+#define CAMCC_PLL7_OUT_EVEN 15
+#define CAMCC_PLL7_OUT_ODD 16
+#define CAMCC_BPS_AHB_CLK 17
+#define CAMCC_BPS_AREG_CLK 18
+#define CAMCC_BPS_AXI_CLK 19
+#define CAMCC_BPS_CLK 20
+#define CAMCC_BPS_CLK_SRC 21
+#define CAMCC_CAMNOC_AXI_CLK 22
+#define CAMCC_CAMNOC_AXI_CLK_SRC 23
+#define CAMCC_CAMNOC_DCD_XO_CLK 24
+#define CAMCC_CCI_0_CLK 25
+#define CAMCC_CCI_0_CLK_SRC 26
+#define CAMCC_CCI_1_CLK 27
+#define CAMCC_CCI_1_CLK_SRC 28
+#define CAMCC_CCI_2_CLK 29
+#define CAMCC_CCI_2_CLK_SRC 30
+#define CAMCC_CCI_3_CLK 31
+#define CAMCC_CCI_3_CLK_SRC 32
+#define CAMCC_CORE_AHB_CLK 33
+#define CAMCC_CPAS_AHB_CLK 34
+#define CAMCC_CPHY_RX_CLK_SRC 35
+#define CAMCC_CSI0PHYTIMER_CLK 36
+#define CAMCC_CSI0PHYTIMER_CLK_SRC 37
+#define CAMCC_CSI1PHYTIMER_CLK 38
+#define CAMCC_CSI1PHYTIMER_CLK_SRC 39
+#define CAMCC_CSI2PHYTIMER_CLK 40
+#define CAMCC_CSI2PHYTIMER_CLK_SRC 41
+#define CAMCC_CSI3PHYTIMER_CLK 42
+#define CAMCC_CSI3PHYTIMER_CLK_SRC 43
+#define CAMCC_CSIPHY0_CLK 44
+#define CAMCC_CSIPHY1_CLK 45
+#define CAMCC_CSIPHY2_CLK 46
+#define CAMCC_CSIPHY3_CLK 47
+#define CAMCC_FAST_AHB_CLK_SRC 48
+#define CAMCC_GDSC_CLK 49
+#define CAMCC_ICP_AHB_CLK 50
+#define CAMCC_ICP_CLK 51
+#define CAMCC_ICP_CLK_SRC 52
+#define CAMCC_IFE_0_AXI_CLK 53
+#define CAMCC_IFE_0_CLK 54
+#define CAMCC_IFE_0_CLK_SRC 55
+#define CAMCC_IFE_0_CPHY_RX_CLK 56
+#define CAMCC_IFE_0_CSID_CLK 57
+#define CAMCC_IFE_0_CSID_CLK_SRC 58
+#define CAMCC_IFE_0_DSP_CLK 59
+#define CAMCC_IFE_1_AXI_CLK 60
+#define CAMCC_IFE_1_CLK 61
+#define CAMCC_IFE_1_CLK_SRC 62
+#define CAMCC_IFE_1_CPHY_RX_CLK 63
+#define CAMCC_IFE_1_CSID_CLK 64
+#define CAMCC_IFE_1_CSID_CLK_SRC 65
+#define CAMCC_IFE_1_DSP_CLK 66
+#define CAMCC_IFE_2_AXI_CLK 67
+#define CAMCC_IFE_2_CLK 68
+#define CAMCC_IFE_2_CLK_SRC 69
+#define CAMCC_IFE_2_CPHY_RX_CLK 70
+#define CAMCC_IFE_2_CSID_CLK 71
+#define CAMCC_IFE_2_CSID_CLK_SRC 72
+#define CAMCC_IFE_2_DSP_CLK 73
+#define CAMCC_IFE_3_AXI_CLK 74
+#define CAMCC_IFE_3_CLK 75
+#define CAMCC_IFE_3_CLK_SRC 76
+#define CAMCC_IFE_3_CPHY_RX_CLK 77
+#define CAMCC_IFE_3_CSID_CLK 78
+#define CAMCC_IFE_3_CSID_CLK_SRC 79
+#define CAMCC_IFE_3_DSP_CLK 80
+#define CAMCC_IFE_LITE_0_CLK 81
+#define CAMCC_IFE_LITE_0_CLK_SRC 82
+#define CAMCC_IFE_LITE_0_CPHY_RX_CLK 83
+#define CAMCC_IFE_LITE_0_CSID_CLK 84
+#define CAMCC_IFE_LITE_0_CSID_CLK_SRC 85
+#define CAMCC_IFE_LITE_1_CLK 86
+#define CAMCC_IFE_LITE_1_CLK_SRC 87
+#define CAMCC_IFE_LITE_1_CPHY_RX_CLK 88
+#define CAMCC_IFE_LITE_1_CSID_CLK 89
+#define CAMCC_IFE_LITE_1_CSID_CLK_SRC 90
+#define CAMCC_IFE_LITE_2_CLK 91
+#define CAMCC_IFE_LITE_2_CLK_SRC 92
+#define CAMCC_IFE_LITE_2_CPHY_RX_CLK 93
+#define CAMCC_IFE_LITE_2_CSID_CLK 94
+#define CAMCC_IFE_LITE_2_CSID_CLK_SRC 95
+#define CAMCC_IFE_LITE_3_CLK 96
+#define CAMCC_IFE_LITE_3_CLK_SRC 97
+#define CAMCC_IFE_LITE_3_CPHY_RX_CLK 98
+#define CAMCC_IFE_LITE_3_CSID_CLK 99
+#define CAMCC_IFE_LITE_3_CSID_CLK_SRC 100
+#define CAMCC_IPE_0_AHB_CLK 101
+#define CAMCC_IPE_0_AREG_CLK 102
+#define CAMCC_IPE_0_AXI_CLK 103
+#define CAMCC_IPE_0_CLK 104
+#define CAMCC_IPE_0_CLK_SRC 105
+#define CAMCC_IPE_1_AHB_CLK 106
+#define CAMCC_IPE_1_AREG_CLK 107
+#define CAMCC_IPE_1_AXI_CLK 108
+#define CAMCC_IPE_1_CLK 109
+#define CAMCC_JPEG_CLK 110
+#define CAMCC_JPEG_CLK_SRC 111
+#define CAMCC_LRME_CLK 112
+#define CAMCC_LRME_CLK_SRC 113
+#define CAMCC_MCLK0_CLK 114
+#define CAMCC_MCLK0_CLK_SRC 115
+#define CAMCC_MCLK1_CLK 116
+#define CAMCC_MCLK1_CLK_SRC 117
+#define CAMCC_MCLK2_CLK 118
+#define CAMCC_MCLK2_CLK_SRC 119
+#define CAMCC_MCLK3_CLK 120
+#define CAMCC_MCLK3_CLK_SRC 121
+#define CAMCC_MCLK4_CLK 122
+#define CAMCC_MCLK4_CLK_SRC 123
+#define CAMCC_MCLK5_CLK 124
+#define CAMCC_MCLK5_CLK_SRC 125
+#define CAMCC_MCLK6_CLK 126
+#define CAMCC_MCLK6_CLK_SRC 127
+#define CAMCC_MCLK7_CLK 128
+#define CAMCC_MCLK7_CLK_SRC 129
+#define CAMCC_SLEEP_CLK 130
+#define CAMCC_SLEEP_CLK_SRC 131
+#define CAMCC_SLOW_AHB_CLK_SRC 132
+#define CAMCC_XO_CLK_SRC 133
+
+/* CAMCC resets */
+#define CAMCC_BPS_BCR 0
+#define CAMCC_CAMNOC_BCR 1
+#define CAMCC_CCI_BCR 2
+#define CAMCC_CPAS_BCR 3
+#define CAMCC_CSI0PHY_BCR 4
+#define CAMCC_CSI1PHY_BCR 5
+#define CAMCC_CSI2PHY_BCR 6
+#define CAMCC_CSI3PHY_BCR 7
+#define CAMCC_ICP_BCR 8
+#define CAMCC_IFE_0_BCR 9
+#define CAMCC_IFE_1_BCR 10
+#define CAMCC_IFE_2_BCR 11
+#define CAMCC_IFE_3_BCR 12
+#define CAMCC_IFE_LITE_0_BCR 13
+#define CAMCC_IFE_LITE_1_BCR 14
+#define CAMCC_IFE_LITE_2_BCR 15
+#define CAMCC_IFE_LITE_3_BCR 16
+#define CAMCC_IPE_0_BCR 17
+#define CAMCC_IPE_1_BCR 18
+#define CAMCC_JPEG_BCR 19
+#define CAMCC_LRME_BCR 20
+
+/* CAMCC GDSCRs */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IFE_2_GDSC 3
+#define IFE_3_GDSC 4
+#define IPE_0_GDSC 5
+#define IPE_1_GDSC 6
+#define TITAN_TOP_GDSC 7
+
+#endif /* __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__ */
diff --git a/include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h b/include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h
new file mode 100644
index 000000000000..d190d57fc81a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASSCC_SC8280XP_H
+#define _DT_BINDINGS_CLK_QCOM_LPASSCC_SC8280XP_H
+
+/* LPASS AUDIO CC CSR */
+#define LPASS_AUDIO_SWR_RX_CGCR 0
+#define LPASS_AUDIO_SWR_WSA_CGCR 1
+#define LPASS_AUDIO_SWR_WSA2_CGCR 2
+
+/* LPASS TCSR */
+#define LPASS_AUDIO_SWR_TX_CGCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sdx75-gcc.h b/include/dt-bindings/clock/qcom,sdx75-gcc.h
new file mode 100644
index 000000000000..a470e8c4fd41
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sdx75-gcc.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDX75_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SDX75_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL4 2
+#define GPLL5 3
+#define GPLL6 4
+#define GPLL8 5
+#define GCC_AHB_PCIE_LINK_CLK 6
+#define GCC_BOOT_ROM_AHB_CLK 7
+#define GCC_EEE_EMAC0_CLK 8
+#define GCC_EEE_EMAC0_CLK_SRC 9
+#define GCC_EEE_EMAC1_CLK 10
+#define GCC_EEE_EMAC1_CLK_SRC 11
+#define GCC_EMAC0_AXI_CLK 12
+#define GCC_EMAC0_CC_SGMIIPHY_RX_CLK 13
+#define GCC_EMAC0_CC_SGMIIPHY_RX_CLK_SRC 14
+#define GCC_EMAC0_CC_SGMIIPHY_TX_CLK 15
+#define GCC_EMAC0_CC_SGMIIPHY_TX_CLK_SRC 16
+#define GCC_EMAC0_PHY_AUX_CLK 17
+#define GCC_EMAC0_PHY_AUX_CLK_SRC 18
+#define GCC_EMAC0_PTP_CLK 19
+#define GCC_EMAC0_PTP_CLK_SRC 20
+#define GCC_EMAC0_RGMII_CLK 21
+#define GCC_EMAC0_RGMII_CLK_SRC 22
+#define GCC_EMAC0_RPCS_RX_CLK 23
+#define GCC_EMAC0_RPCS_TX_CLK 24
+#define GCC_EMAC0_SGMIIPHY_MAC_RCLK_SRC 25
+#define GCC_EMAC0_SGMIIPHY_MAC_TCLK_SRC 26
+#define GCC_EMAC0_SLV_AHB_CLK 27
+#define GCC_EMAC0_XGXS_RX_CLK 28
+#define GCC_EMAC0_XGXS_TX_CLK 29
+#define GCC_EMAC1_AXI_CLK 30
+#define GCC_EMAC1_CC_SGMIIPHY_RX_CLK 31
+#define GCC_EMAC1_CC_SGMIIPHY_RX_CLK_SRC 32
+#define GCC_EMAC1_CC_SGMIIPHY_TX_CLK 33
+#define GCC_EMAC1_CC_SGMIIPHY_TX_CLK_SRC 34
+#define GCC_EMAC1_PHY_AUX_CLK 35
+#define GCC_EMAC1_PHY_AUX_CLK_SRC 36
+#define GCC_EMAC1_PTP_CLK 37
+#define GCC_EMAC1_PTP_CLK_SRC 38
+#define GCC_EMAC1_RGMII_CLK 39
+#define GCC_EMAC1_RGMII_CLK_SRC 40
+#define GCC_EMAC1_RPCS_RX_CLK 41
+#define GCC_EMAC1_RPCS_TX_CLK 42
+#define GCC_EMAC1_SGMIIPHY_MAC_RCLK_SRC 43
+#define GCC_EMAC1_SGMIIPHY_MAC_TCLK_SRC 44
+#define GCC_EMAC1_SLV_AHB_CLK 45
+#define GCC_EMAC1_XGXS_RX_CLK 46
+#define GCC_EMAC1_XGXS_TX_CLK 47
+#define GCC_EMAC_0_CLKREF_EN 48
+#define GCC_EMAC_1_CLKREF_EN 49
+#define GCC_GP1_CLK 50
+#define GCC_GP1_CLK_SRC 51
+#define GCC_GP2_CLK 52
+#define GCC_GP2_CLK_SRC 53
+#define GCC_GP3_CLK 54
+#define GCC_GP3_CLK_SRC 55
+#define GCC_PCIE_0_CLKREF_EN 56
+#define GCC_PCIE_1_AUX_CLK 57
+#define GCC_PCIE_1_AUX_PHY_CLK_SRC 58
+#define GCC_PCIE_1_CFG_AHB_CLK 59
+#define GCC_PCIE_1_CLKREF_EN 60
+#define GCC_PCIE_1_MSTR_AXI_CLK 61
+#define GCC_PCIE_1_PHY_RCHNG_CLK 62
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 63
+#define GCC_PCIE_1_PIPE_CLK 64
+#define GCC_PCIE_1_PIPE_CLK_SRC 65
+#define GCC_PCIE_1_PIPE_DIV2_CLK 66
+#define GCC_PCIE_1_PIPE_DIV2_CLK_SRC 67
+#define GCC_PCIE_1_SLV_AXI_CLK 68
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 69
+#define GCC_PCIE_2_AUX_CLK 70
+#define GCC_PCIE_2_AUX_PHY_CLK_SRC 71
+#define GCC_PCIE_2_CFG_AHB_CLK 72
+#define GCC_PCIE_2_CLKREF_EN 73
+#define GCC_PCIE_2_MSTR_AXI_CLK 74
+#define GCC_PCIE_2_PHY_RCHNG_CLK 75
+#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 76
+#define GCC_PCIE_2_PIPE_CLK 77
+#define GCC_PCIE_2_PIPE_CLK_SRC 78
+#define GCC_PCIE_2_PIPE_DIV2_CLK 79
+#define GCC_PCIE_2_PIPE_DIV2_CLK_SRC 80
+#define GCC_PCIE_2_SLV_AXI_CLK 81
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 82
+#define GCC_PCIE_AUX_CLK 83
+#define GCC_PCIE_AUX_CLK_SRC 84
+#define GCC_PCIE_AUX_PHY_CLK_SRC 85
+#define GCC_PCIE_CFG_AHB_CLK 86
+#define GCC_PCIE_MSTR_AXI_CLK 87
+#define GCC_PCIE_PIPE_CLK 88
+#define GCC_PCIE_PIPE_CLK_SRC 89
+#define GCC_PCIE_RCHNG_PHY_CLK 90
+#define GCC_PCIE_RCHNG_PHY_CLK_SRC 91
+#define GCC_PCIE_SLEEP_CLK 92
+#define GCC_PCIE_SLV_AXI_CLK 93
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 94
+#define GCC_PDM2_CLK 95
+#define GCC_PDM2_CLK_SRC 96
+#define GCC_PDM_AHB_CLK 97
+#define GCC_PDM_XO4_CLK 98
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 99
+#define GCC_QUPV3_WRAP0_CORE_CLK 100
+#define GCC_QUPV3_WRAP0_S0_CLK 101
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 102
+#define GCC_QUPV3_WRAP0_S1_CLK 103
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 104
+#define GCC_QUPV3_WRAP0_S2_CLK 105
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 106
+#define GCC_QUPV3_WRAP0_S3_CLK 107
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 108
+#define GCC_QUPV3_WRAP0_S4_CLK 109
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 110
+#define GCC_QUPV3_WRAP0_S5_CLK 111
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 112
+#define GCC_QUPV3_WRAP0_S6_CLK 113
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 114
+#define GCC_QUPV3_WRAP0_S7_CLK 115
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 116
+#define GCC_QUPV3_WRAP0_S8_CLK 117
+#define GCC_QUPV3_WRAP0_S8_CLK_SRC 118
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 119
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 120
+#define GCC_SDCC1_AHB_CLK 121
+#define GCC_SDCC1_APPS_CLK 122
+#define GCC_SDCC1_APPS_CLK_SRC 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_SDCC2_APPS_CLK_SRC 126
+#define GCC_USB2_CLKREF_EN 127
+#define GCC_USB30_MASTER_CLK 128
+#define GCC_USB30_MASTER_CLK_SRC 129
+#define GCC_USB30_MOCK_UTMI_CLK 130
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_MOCK_UTMI_POSTDIV_CLK_SRC 132
+#define GCC_USB30_MSTR_AXI_CLK 133
+#define GCC_USB30_SLEEP_CLK 134
+#define GCC_USB30_SLV_AHB_CLK 135
+#define GCC_USB3_PHY_AUX_CLK 136
+#define GCC_USB3_PHY_AUX_CLK_SRC 137
+#define GCC_USB3_PHY_PIPE_CLK 138
+#define GCC_USB3_PHY_PIPE_CLK_SRC 139
+#define GCC_USB3_PRIM_CLKREF_EN 140
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 141
+#define GCC_XO_PCIE_LINK_CLK 142
+
+/* GCC power domains */
+#define GCC_EMAC0_GDSC 0
+#define GCC_EMAC1_GDSC 1
+#define GCC_PCIE_1_GDSC 2
+#define GCC_PCIE_1_PHY_GDSC 3
+#define GCC_PCIE_2_GDSC 4
+#define GCC_PCIE_2_PHY_GDSC 5
+#define GCC_PCIE_GDSC 6
+#define GCC_PCIE_PHY_GDSC 7
+#define GCC_USB30_GDSC 8
+#define GCC_USB3_PHY_GDSC 9
+
+/* GCC resets */
+#define GCC_EMAC0_BCR 0
+#define GCC_EMAC1_BCR 1
+#define GCC_EMMC_BCR 2
+#define GCC_PCIE_1_BCR 3
+#define GCC_PCIE_1_LINK_DOWN_BCR 4
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_1_PHY_BCR 6
+#define GCC_PCIE_2_BCR 7
+#define GCC_PCIE_2_LINK_DOWN_BCR 8
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 9
+#define GCC_PCIE_2_PHY_BCR 10
+#define GCC_PCIE_BCR 11
+#define GCC_PCIE_LINK_DOWN_BCR 12
+#define GCC_PCIE_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_PHY_BCR 14
+#define GCC_PCIE_PHY_CFG_AHB_BCR 15
+#define GCC_PCIE_PHY_COM_BCR 16
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 17
+#define GCC_QUSB2PHY_BCR 18
+#define GCC_TCSR_PCIE_BCR 19
+#define GCC_USB30_BCR 20
+#define GCC_USB3_PHY_BCR 21
+#define GCC_USB3PHY_PHY_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_EMAC0_RGMII_CLK_ARES 24
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-gcc.h b/include/dt-bindings/clock/qcom,sm4450-gcc.h
new file mode 100644
index 000000000000..c18e47a86f40
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-gcc.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_BOOT_ROM_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_SLEEP_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11
+#define GCC_DDRSS_GPU_AXI_CLK 12
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 13
+#define GCC_DISP_AHB_CLK 14
+#define GCC_DISP_HF_AXI_CLK 15
+#define GCC_DISP_XO_CLK 16
+#define GCC_EUSB3_0_CLKREF_EN 17
+#define GCC_GP1_CLK 18
+#define GCC_GP1_CLK_SRC 19
+#define GCC_GP2_CLK 20
+#define GCC_GP2_CLK_SRC 21
+#define GCC_GP3_CLK 22
+#define GCC_GP3_CLK_SRC 23
+#define GCC_GPLL0 24
+#define GCC_GPLL0_OUT_EVEN 25
+#define GCC_GPLL0_OUT_ODD 26
+#define GCC_GPLL1 27
+#define GCC_GPLL3 28
+#define GCC_GPLL4 29
+#define GCC_GPLL9 30
+#define GCC_GPLL10 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GPLL0_CLK_SRC 33
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
+#define GCC_GPU_MEMNOC_GFX_CLK 35
+#define GCC_GPU_SNOC_DVM_GFX_CLK 36
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_CLK 37
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_CLK 38
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_CLK 39
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_CLK 40
+#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF0_CLK 41
+#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF1_CLK 42
+#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_SF0_CLK 43
+#define GCC_HLOS1_VOTE_MMU_TCU_CLK 44
+#define GCC_PCIE_0_AUX_CLK 45
+#define GCC_PCIE_0_AUX_CLK_SRC 46
+#define GCC_PCIE_0_CFG_AHB_CLK 47
+#define GCC_PCIE_0_CLKREF_EN 48
+#define GCC_PCIE_0_MSTR_AXI_CLK 49
+#define GCC_PCIE_0_PHY_RCHNG_CLK 50
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 51
+#define GCC_PCIE_0_PIPE_CLK 52
+#define GCC_PCIE_0_PIPE_CLK_SRC 53
+#define GCC_PCIE_0_PIPE_DIV2_CLK 54
+#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 55
+#define GCC_PCIE_0_SLV_AXI_CLK 56
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM2_CLK_SRC 59
+#define GCC_PDM_AHB_CLK 60
+#define GCC_PDM_XO4_CLK 61
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 63
+#define GCC_QMIP_DISP_AHB_CLK 64
+#define GCC_QMIP_GPU_AHB_CLK 65
+#define GCC_QMIP_PCIE_AHB_CLK 66
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 68
+#define GCC_QUPV3_WRAP0_CORE_CLK 69
+#define GCC_QUPV3_WRAP0_S0_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S1_CLK 72
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S2_CLK 74
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S3_CLK 76
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S4_CLK 78
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 80
+#define GCC_QUPV3_WRAP1_CORE_CLK 81
+#define GCC_QUPV3_WRAP1_S0_CLK 82
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S1_CLK 84
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S2_CLK 86
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S3_CLK 88
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S4_CLK 90
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 91
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 92
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 93
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 94
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 95
+#define GCC_SDCC1_AHB_CLK 96
+#define GCC_SDCC1_APPS_CLK 97
+#define GCC_SDCC1_APPS_CLK_SRC 98
+#define GCC_SDCC1_ICE_CORE_CLK 99
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 100
+#define GCC_SDCC2_AHB_CLK 101
+#define GCC_SDCC2_APPS_CLK 102
+#define GCC_SDCC2_APPS_CLK_SRC 103
+#define GCC_UFS_0_CLKREF_EN 104
+#define GCC_UFS_PAD_CLKREF_EN 105
+#define GCC_UFS_PHY_AHB_CLK 106
+#define GCC_UFS_PHY_AXI_CLK 107
+#define GCC_UFS_PHY_AXI_CLK_SRC 108
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 109
+#define GCC_UFS_PHY_ICE_CORE_CLK 110
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 111
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 112
+#define GCC_UFS_PHY_PHY_AUX_CLK 113
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 114
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 115
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 116
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 117
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 118
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 119
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 120
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 121
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 122
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 123
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 124
+#define GCC_USB30_PRIM_MASTER_CLK 125
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 126
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 127
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 128
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 129
+#define GCC_USB30_PRIM_SLEEP_CLK 130
+#define GCC_USB3_0_CLKREF_EN 131
+#define GCC_USB3_PRIM_PHY_AUX_CLK 132
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 133
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 134
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 135
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 136
+#define GCC_VCODEC0_AXI_CLK 137
+#define GCC_VENUS_CTL_AXI_CLK 138
+#define GCC_VIDEO_AHB_CLK 139
+#define GCC_VIDEO_THROTTLE_CORE_CLK 140
+#define GCC_VIDEO_VCODEC0_SYS_CLK 141
+#define GCC_VIDEO_VENUS_CLK_SRC 142
+#define GCC_VIDEO_VENUS_CTL_CLK 143
+#define GCC_VIDEO_XO_CLK 144
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_UFS_PHY_GDSC 1
+#define GCC_USB30_PRIM_GDSC 2
+#define GCC_VCODEC0_GDSC 3
+#define GCC_VENUS_GDSC 4
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_PHY_BCR 8
+#define GCC_PCIE_PHY_CFG_AHB_BCR 9
+#define GCC_PCIE_PHY_COM_BCR 10
+#define GCC_PDM_BCR 11
+#define GCC_QUPV3_WRAPPER_0_BCR 12
+#define GCC_QUPV3_WRAPPER_1_BCR 13
+#define GCC_QUSB2PHY_PRIM_BCR 14
+#define GCC_QUSB2PHY_SEC_BCR 15
+#define GCC_SDCC1_BCR 16
+#define GCC_SDCC2_BCR 17
+#define GCC_UFS_PHY_BCR 18
+#define GCC_USB30_PRIM_BCR 19
+#define GCC_USB3_DP_PHY_PRIM_BCR 20
+#define GCC_USB3_DP_PHY_SEC_BCR 21
+#define GCC_USB3_PHY_PRIM_BCR 22
+#define GCC_USB3_PHY_SEC_BCR 23
+#define GCC_USB3PHY_PHY_PRIM_BCR 24
+#define GCC_USB3PHY_PHY_SEC_BCR 25
+#define GCC_VCODEC0_BCR 26
+#define GCC_VENUS_BCR 27
+#define GCC_VIDEO_BCR 28
+#define GCC_VIDEO_VENUS_BCR 29
+#define GCC_VENUS_CTL_AXI_CLK_ARES 30
+#define GCC_VIDEO_VENUS_CTL_CLK_ARES 31
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8350-videocc.h b/include/dt-bindings/clock/qcom,sm8350-videocc.h
new file mode 100644
index 000000000000..b6945a448676
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8350-videocc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8350_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8350_H
+
+/* Clocks */
+#define VIDEO_CC_AHB_CLK_SRC 0
+#define VIDEO_CC_MVS0_CLK 1
+#define VIDEO_CC_MVS0_CLK_SRC 2
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 3
+#define VIDEO_CC_MVS0C_CLK 4
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 5
+#define VIDEO_CC_MVS1_CLK 6
+#define VIDEO_CC_MVS1_CLK_SRC 7
+#define VIDEO_CC_MVS1_DIV2_CLK 8
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 9
+#define VIDEO_CC_MVS1C_CLK 10
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 11
+#define VIDEO_CC_SLEEP_CLK 12
+#define VIDEO_CC_SLEEP_CLK_SRC 13
+#define VIDEO_CC_XO_CLK_SRC 14
+#define VIDEO_PLL0 15
+#define VIDEO_PLL1 16
+
+/* GDSCs */
+#define MVS0C_GDSC 0
+#define MVS1C_GDSC 1
+#define MVS0_GDSC 2
+#define MVS1_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-gpucc.h b/include/dt-bindings/clock/qcom,sm8450-gpucc.h
new file mode 100644
index 000000000000..712b171503d6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-gpucc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8450_H
+
+/* Clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_APB_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CX_SNOC_DVM_CLK 5
+#define GPU_CC_CXO_AON_CLK 6
+#define GPU_CC_CXO_CLK 7
+#define GPU_CC_DEMET_CLK 8
+#define GPU_CC_DEMET_DIV_CLK_SRC 9
+#define GPU_CC_FF_CLK_SRC 10
+#define GPU_CC_FREQ_MEASURE_CLK 11
+#define GPU_CC_GMU_CLK_SRC 12
+#define GPU_CC_GX_FF_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_RDVM_CLK 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 18
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 19
+#define GPU_CC_HUB_AON_CLK 20
+#define GPU_CC_HUB_CLK_SRC 21
+#define GPU_CC_HUB_CX_INT_CLK 22
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 23
+#define GPU_CC_MEMNOC_GFX_CLK 24
+#define GPU_CC_MND1X_0_GFX3D_CLK 25
+#define GPU_CC_MND1X_1_GFX3D_CLK 26
+#define GPU_CC_PLL0 27
+#define GPU_CC_PLL1 28
+#define GPU_CC_SLEEP_CLK 29
+#define GPU_CC_XO_CLK_SRC 30
+#define GPU_CC_XO_DIV_CLK_SRC 31
+
+/* GDSCs */
+#define GPU_GX_GDSC 0
+#define GPU_CX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-videocc.h b/include/dt-bindings/clock/qcom,sm8450-videocc.h
new file mode 100644
index 000000000000..9d795adfe4eb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-videocc.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8450_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_MVS0_CLK 0
+#define VIDEO_CC_MVS0_CLK_SRC 1
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 2
+#define VIDEO_CC_MVS0C_CLK 3
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 4
+#define VIDEO_CC_MVS1_CLK 5
+#define VIDEO_CC_MVS1_CLK_SRC 6
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 7
+#define VIDEO_CC_MVS1C_CLK 8
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 9
+#define VIDEO_CC_PLL0 10
+#define VIDEO_CC_PLL1 11
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0C_GDSC 0
+#define VIDEO_CC_MVS0_GDSC 1
+#define VIDEO_CC_MVS1C_GDSC 2
+#define VIDEO_CC_MVS1_GDSC 3
+
+/* VIDEO_CC resets */
+#define CVP_VIDEO_CC_INTERFACE_BCR 0
+#define CVP_VIDEO_CC_MVS0_BCR 1
+#define CVP_VIDEO_CC_MVS0C_BCR 2
+#define CVP_VIDEO_CC_MVS1_BCR 3
+#define CVP_VIDEO_CC_MVS1C_BCR 4
+#define VIDEO_CC_MVS0C_CLK_ARES 5
+#define VIDEO_CC_MVS1C_CLK_ARES 6
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-camcc.h b/include/dt-bindings/clock/qcom,sm8550-camcc.h
new file mode 100644
index 000000000000..a2a256691c2b
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-camcc.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_CAMNOC_AXI_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 5
+#define CAM_CC_CAMNOC_DCD_XO_CLK 6
+#define CAM_CC_CAMNOC_XO_CLK 7
+#define CAM_CC_CCI_0_CLK 8
+#define CAM_CC_CCI_0_CLK_SRC 9
+#define CAM_CC_CCI_1_CLK 10
+#define CAM_CC_CCI_1_CLK_SRC 11
+#define CAM_CC_CCI_2_CLK 12
+#define CAM_CC_CCI_2_CLK_SRC 13
+#define CAM_CC_CORE_AHB_CLK 14
+#define CAM_CC_CPAS_AHB_CLK 15
+#define CAM_CC_CPAS_BPS_CLK 16
+#define CAM_CC_CPAS_CRE_CLK 17
+#define CAM_CC_CPAS_FAST_AHB_CLK 18
+#define CAM_CC_CPAS_IFE_0_CLK 19
+#define CAM_CC_CPAS_IFE_1_CLK 20
+#define CAM_CC_CPAS_IFE_2_CLK 21
+#define CAM_CC_CPAS_IFE_LITE_CLK 22
+#define CAM_CC_CPAS_IPE_NPS_CLK 23
+#define CAM_CC_CPAS_SBI_CLK 24
+#define CAM_CC_CPAS_SFE_0_CLK 25
+#define CAM_CC_CPAS_SFE_1_CLK 26
+#define CAM_CC_CPHY_RX_CLK_SRC 27
+#define CAM_CC_CRE_AHB_CLK 28
+#define CAM_CC_CRE_CLK 29
+#define CAM_CC_CRE_CLK_SRC 30
+#define CAM_CC_CSI0PHYTIMER_CLK 31
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 32
+#define CAM_CC_CSI1PHYTIMER_CLK 33
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 34
+#define CAM_CC_CSI2PHYTIMER_CLK 35
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 36
+#define CAM_CC_CSI3PHYTIMER_CLK 37
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 38
+#define CAM_CC_CSI4PHYTIMER_CLK 39
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 40
+#define CAM_CC_CSI5PHYTIMER_CLK 41
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 42
+#define CAM_CC_CSI6PHYTIMER_CLK 43
+#define CAM_CC_CSI6PHYTIMER_CLK_SRC 44
+#define CAM_CC_CSI7PHYTIMER_CLK 45
+#define CAM_CC_CSI7PHYTIMER_CLK_SRC 46
+#define CAM_CC_CSID_CLK 47
+#define CAM_CC_CSID_CLK_SRC 48
+#define CAM_CC_CSID_CSIPHY_RX_CLK 49
+#define CAM_CC_CSIPHY0_CLK 50
+#define CAM_CC_CSIPHY1_CLK 51
+#define CAM_CC_CSIPHY2_CLK 52
+#define CAM_CC_CSIPHY3_CLK 53
+#define CAM_CC_CSIPHY4_CLK 54
+#define CAM_CC_CSIPHY5_CLK 55
+#define CAM_CC_CSIPHY6_CLK 56
+#define CAM_CC_CSIPHY7_CLK 57
+#define CAM_CC_DRV_AHB_CLK 58
+#define CAM_CC_DRV_XO_CLK 59
+#define CAM_CC_FAST_AHB_CLK_SRC 60
+#define CAM_CC_GDSC_CLK 61
+#define CAM_CC_ICP_AHB_CLK 62
+#define CAM_CC_ICP_CLK 63
+#define CAM_CC_ICP_CLK_SRC 64
+#define CAM_CC_IFE_0_CLK 65
+#define CAM_CC_IFE_0_CLK_SRC 66
+#define CAM_CC_IFE_0_DSP_CLK 67
+#define CAM_CC_IFE_0_DSP_CLK_SRC 68
+#define CAM_CC_IFE_0_FAST_AHB_CLK 69
+#define CAM_CC_IFE_1_CLK 70
+#define CAM_CC_IFE_1_CLK_SRC 71
+#define CAM_CC_IFE_1_DSP_CLK 72
+#define CAM_CC_IFE_1_DSP_CLK_SRC 73
+#define CAM_CC_IFE_1_FAST_AHB_CLK 74
+#define CAM_CC_IFE_2_CLK 75
+#define CAM_CC_IFE_2_CLK_SRC 76
+#define CAM_CC_IFE_2_DSP_CLK 77
+#define CAM_CC_IFE_2_DSP_CLK_SRC 78
+#define CAM_CC_IFE_2_FAST_AHB_CLK 79
+#define CAM_CC_IFE_LITE_AHB_CLK 80
+#define CAM_CC_IFE_LITE_CLK 81
+#define CAM_CC_IFE_LITE_CLK_SRC 82
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 83
+#define CAM_CC_IFE_LITE_CSID_CLK 84
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 85
+#define CAM_CC_IPE_NPS_AHB_CLK 86
+#define CAM_CC_IPE_NPS_CLK 87
+#define CAM_CC_IPE_NPS_CLK_SRC 88
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 89
+#define CAM_CC_IPE_PPS_CLK 90
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 91
+#define CAM_CC_JPEG_1_CLK 92
+#define CAM_CC_JPEG_CLK 93
+#define CAM_CC_JPEG_CLK_SRC 94
+#define CAM_CC_MCLK0_CLK 95
+#define CAM_CC_MCLK0_CLK_SRC 96
+#define CAM_CC_MCLK1_CLK 97
+#define CAM_CC_MCLK1_CLK_SRC 98
+#define CAM_CC_MCLK2_CLK 99
+#define CAM_CC_MCLK2_CLK_SRC 100
+#define CAM_CC_MCLK3_CLK 101
+#define CAM_CC_MCLK3_CLK_SRC 102
+#define CAM_CC_MCLK4_CLK 103
+#define CAM_CC_MCLK4_CLK_SRC 104
+#define CAM_CC_MCLK5_CLK 105
+#define CAM_CC_MCLK5_CLK_SRC 106
+#define CAM_CC_MCLK6_CLK 107
+#define CAM_CC_MCLK6_CLK_SRC 108
+#define CAM_CC_MCLK7_CLK 109
+#define CAM_CC_MCLK7_CLK_SRC 110
+#define CAM_CC_PLL0 111
+#define CAM_CC_PLL0_OUT_EVEN 112
+#define CAM_CC_PLL0_OUT_ODD 113
+#define CAM_CC_PLL1 114
+#define CAM_CC_PLL1_OUT_EVEN 115
+#define CAM_CC_PLL2 116
+#define CAM_CC_PLL3 117
+#define CAM_CC_PLL3_OUT_EVEN 118
+#define CAM_CC_PLL4 119
+#define CAM_CC_PLL4_OUT_EVEN 120
+#define CAM_CC_PLL5 121
+#define CAM_CC_PLL5_OUT_EVEN 122
+#define CAM_CC_PLL6 123
+#define CAM_CC_PLL6_OUT_EVEN 124
+#define CAM_CC_PLL7 125
+#define CAM_CC_PLL7_OUT_EVEN 126
+#define CAM_CC_PLL8 127
+#define CAM_CC_PLL8_OUT_EVEN 128
+#define CAM_CC_PLL9 129
+#define CAM_CC_PLL9_OUT_EVEN 130
+#define CAM_CC_PLL10 131
+#define CAM_CC_PLL10_OUT_EVEN 132
+#define CAM_CC_PLL11 133
+#define CAM_CC_PLL11_OUT_EVEN 134
+#define CAM_CC_PLL12 135
+#define CAM_CC_PLL12_OUT_EVEN 136
+#define CAM_CC_QDSS_DEBUG_CLK 137
+#define CAM_CC_QDSS_DEBUG_CLK_SRC 138
+#define CAM_CC_QDSS_DEBUG_XO_CLK 139
+#define CAM_CC_SBI_CLK 140
+#define CAM_CC_SBI_FAST_AHB_CLK 141
+#define CAM_CC_SFE_0_CLK 142
+#define CAM_CC_SFE_0_CLK_SRC 143
+#define CAM_CC_SFE_0_FAST_AHB_CLK 144
+#define CAM_CC_SFE_1_CLK 145
+#define CAM_CC_SFE_1_CLK_SRC 146
+#define CAM_CC_SFE_1_FAST_AHB_CLK 147
+#define CAM_CC_SLEEP_CLK 148
+#define CAM_CC_SLEEP_CLK_SRC 149
+#define CAM_CC_SLOW_AHB_CLK_SRC 150
+#define CAM_CC_XO_CLK_SRC 151
+
+/* CAM_CC power domains */
+#define CAM_CC_BPS_GDSC 0
+#define CAM_CC_IFE_0_GDSC 1
+#define CAM_CC_IFE_1_GDSC 2
+#define CAM_CC_IFE_2_GDSC 3
+#define CAM_CC_IPE_0_GDSC 4
+#define CAM_CC_SBI_GDSC 5
+#define CAM_CC_SFE_0_GDSC 6
+#define CAM_CC_SFE_1_GDSC 7
+#define CAM_CC_TITAN_TOP_GDSC 8
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_DRV_BCR 1
+#define CAM_CC_ICP_BCR 2
+#define CAM_CC_IFE_0_BCR 3
+#define CAM_CC_IFE_1_BCR 4
+#define CAM_CC_IFE_2_BCR 5
+#define CAM_CC_IPE_0_BCR 6
+#define CAM_CC_QDSS_DEBUG_BCR 7
+#define CAM_CC_SBI_BCR 8
+#define CAM_CC_SFE_0_BCR 9
+#define CAM_CC_SFE_1_BCR 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-gpucc.h b/include/dt-bindings/clock/qcom,sm8550-gpucc.h
new file mode 100644
index 000000000000..a6760547a3ab
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-gpucc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8550_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_FF_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CXO_AON_CLK 4
+#define GPU_CC_CXO_CLK 5
+#define GPU_CC_DEMET_CLK 6
+#define GPU_CC_DEMET_DIV_CLK_SRC 7
+#define GPU_CC_FF_CLK_SRC 8
+#define GPU_CC_FREQ_MEASURE_CLK 9
+#define GPU_CC_GMU_CLK_SRC 10
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 11
+#define GPU_CC_HUB_AON_CLK 12
+#define GPU_CC_HUB_CLK_SRC 13
+#define GPU_CC_HUB_CX_INT_CLK 14
+#define GPU_CC_MEMNOC_GFX_CLK 15
+#define GPU_CC_MND1X_0_GFX3D_CLK 16
+#define GPU_CC_MND1X_1_GFX3D_CLK 17
+#define GPU_CC_PLL0 18
+#define GPU_CC_PLL1 19
+#define GPU_CC_SLEEP_CLK 20
+#define GPU_CC_XO_CLK_SRC 21
+#define GPU_CC_XO_DIV_CLK_SRC 22
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-dispcc.h b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
new file mode 100644
index 000000000000..b0a668b395a5
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SM8650_DISP_CC_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_ACCU_CLK 0
+#define DISP_CC_MDSS_AHB1_CLK 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK 8
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 15
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 24
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 25
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 26
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 27
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 28
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 29
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 30
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 31
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 33
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 34
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 35
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 36
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 37
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 50
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 52
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 54
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 55
+#define DISP_CC_MDSS_ESC0_CLK 56
+#define DISP_CC_MDSS_ESC0_CLK_SRC 57
+#define DISP_CC_MDSS_ESC1_CLK 58
+#define DISP_CC_MDSS_ESC1_CLK_SRC 59
+#define DISP_CC_MDSS_MDP1_CLK 60
+#define DISP_CC_MDSS_MDP_CLK 61
+#define DISP_CC_MDSS_MDP_CLK_SRC 62
+#define DISP_CC_MDSS_MDP_LUT1_CLK 63
+#define DISP_CC_MDSS_MDP_LUT_CLK 64
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK 66
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 67
+#define DISP_CC_MDSS_PCLK1_CLK 68
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 69
+#define DISP_CC_MDSS_RSCC_AHB_CLK 70
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 71
+#define DISP_CC_MDSS_VSYNC1_CLK 72
+#define DISP_CC_MDSS_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 74
+#define DISP_CC_PLL0 75
+#define DISP_CC_PLL1 76
+#define DISP_CC_SLEEP_CLK 77
+#define DISP_CC_SLEEP_CLK_SRC 78
+#define DISP_CC_XO_CLK 79
+#define DISP_CC_XO_CLK_SRC 80
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-gcc.h b/include/dt-bindings/clock/qcom,sm8650-gcc.h
new file mode 100644
index 000000000000..0c543ba46079
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-gcc.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_BOOT_ROM_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 9
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 10
+#define GCC_CNOC_PCIE_SF_AXI_CLK 11
+#define GCC_DDRSS_GPU_AXI_CLK 12
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 13
+#define GCC_DISP_AHB_CLK 14
+#define GCC_DISP_HF_AXI_CLK 15
+#define GCC_DISP_XO_CLK 16
+#define GCC_GP1_CLK 17
+#define GCC_GP1_CLK_SRC 18
+#define GCC_GP2_CLK 19
+#define GCC_GP2_CLK_SRC 20
+#define GCC_GP3_CLK 21
+#define GCC_GP3_CLK_SRC 22
+#define GCC_GPLL0 23
+#define GCC_GPLL0_OUT_EVEN 24
+#define GCC_GPLL1 25
+#define GCC_GPLL3 26
+#define GCC_GPLL4 27
+#define GCC_GPLL6 28
+#define GCC_GPLL7 29
+#define GCC_GPLL9 30
+#define GCC_GPU_CFG_AHB_CLK 31
+#define GCC_GPU_GPLL0_CLK_SRC 32
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 33
+#define GCC_GPU_MEMNOC_GFX_CLK 34
+#define GCC_GPU_SNOC_DVM_GFX_CLK 35
+#define GCC_PCIE_0_AUX_CLK 36
+#define GCC_PCIE_0_AUX_CLK_SRC 37
+#define GCC_PCIE_0_CFG_AHB_CLK 38
+#define GCC_PCIE_0_MSTR_AXI_CLK 39
+#define GCC_PCIE_0_PHY_RCHNG_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 41
+#define GCC_PCIE_0_PIPE_CLK 42
+#define GCC_PCIE_0_PIPE_CLK_SRC 43
+#define GCC_PCIE_0_SLV_AXI_CLK 44
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 45
+#define GCC_PCIE_1_AUX_CLK 46
+#define GCC_PCIE_1_AUX_CLK_SRC 47
+#define GCC_PCIE_1_CFG_AHB_CLK 48
+#define GCC_PCIE_1_MSTR_AXI_CLK 49
+#define GCC_PCIE_1_PHY_AUX_CLK 50
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 51
+#define GCC_PCIE_1_PHY_RCHNG_CLK 52
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_PIPE_CLK_SRC 55
+#define GCC_PCIE_1_SLV_AXI_CLK 56
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM2_CLK_SRC 59
+#define GCC_PDM_AHB_CLK 60
+#define GCC_PDM_XO4_CLK 61
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 63
+#define GCC_QMIP_DISP_AHB_CLK 64
+#define GCC_QMIP_GPU_AHB_CLK 65
+#define GCC_QMIP_PCIE_AHB_CLK 66
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 67
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 68
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 69
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 70
+#define GCC_QUPV3_I2C_CORE_CLK 71
+#define GCC_QUPV3_I2C_S0_CLK 72
+#define GCC_QUPV3_I2C_S0_CLK_SRC 73
+#define GCC_QUPV3_I2C_S1_CLK 74
+#define GCC_QUPV3_I2C_S1_CLK_SRC 75
+#define GCC_QUPV3_I2C_S2_CLK 76
+#define GCC_QUPV3_I2C_S2_CLK_SRC 77
+#define GCC_QUPV3_I2C_S3_CLK 78
+#define GCC_QUPV3_I2C_S3_CLK_SRC 79
+#define GCC_QUPV3_I2C_S4_CLK 80
+#define GCC_QUPV3_I2C_S4_CLK_SRC 81
+#define GCC_QUPV3_I2C_S5_CLK 82
+#define GCC_QUPV3_I2C_S5_CLK_SRC 83
+#define GCC_QUPV3_I2C_S6_CLK 84
+#define GCC_QUPV3_I2C_S6_CLK_SRC 85
+#define GCC_QUPV3_I2C_S7_CLK 86
+#define GCC_QUPV3_I2C_S7_CLK_SRC 87
+#define GCC_QUPV3_I2C_S8_CLK 88
+#define GCC_QUPV3_I2C_S8_CLK_SRC 89
+#define GCC_QUPV3_I2C_S9_CLK 90
+#define GCC_QUPV3_I2C_S9_CLK_SRC 91
+#define GCC_QUPV3_I2C_S_AHB_CLK 92
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 93
+#define GCC_QUPV3_WRAP1_CORE_CLK 94
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 95
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S0_CLK 97
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S1_CLK 99
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S2_CLK 101
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_S3_CLK 103
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 104
+#define GCC_QUPV3_WRAP1_S4_CLK 105
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_S5_CLK 107
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 108
+#define GCC_QUPV3_WRAP1_S6_CLK 109
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 110
+#define GCC_QUPV3_WRAP1_S7_CLK 111
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 113
+#define GCC_QUPV3_WRAP2_CORE_CLK 114
+#define GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC 115
+#define GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK 116
+#define GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK 117
+#define GCC_QUPV3_WRAP2_S0_CLK 118
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 119
+#define GCC_QUPV3_WRAP2_S1_CLK 120
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 121
+#define GCC_QUPV3_WRAP2_S2_CLK 122
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 123
+#define GCC_QUPV3_WRAP2_S3_CLK 124
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 125
+#define GCC_QUPV3_WRAP2_S4_CLK 126
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 127
+#define GCC_QUPV3_WRAP2_S5_CLK 128
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 129
+#define GCC_QUPV3_WRAP2_S6_CLK 130
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 131
+#define GCC_QUPV3_WRAP2_S7_CLK 132
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 133
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 134
+#define GCC_QUPV3_WRAP3_CORE_CLK 135
+#define GCC_QUPV3_WRAP3_QSPI_REF_CLK 136
+#define GCC_QUPV3_WRAP3_QSPI_REF_CLK_SRC 137
+#define GCC_QUPV3_WRAP3_S0_CLK 138
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 139
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 141
+#define GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK 142
+#define GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK 143
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 144
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 145
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 146
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 147
+#define GCC_SDCC2_AHB_CLK 148
+#define GCC_SDCC2_APPS_CLK 149
+#define GCC_SDCC2_APPS_CLK_SRC 150
+#define GCC_SDCC4_AHB_CLK 151
+#define GCC_SDCC4_APPS_CLK 152
+#define GCC_SDCC4_APPS_CLK_SRC 153
+#define GCC_UFS_PHY_AHB_CLK 154
+#define GCC_UFS_PHY_AXI_CLK 155
+#define GCC_UFS_PHY_AXI_CLK_SRC 156
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 157
+#define GCC_UFS_PHY_ICE_CORE_CLK 158
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 160
+#define GCC_UFS_PHY_PHY_AUX_CLK 161
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 162
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 165
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 166
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 167
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 169
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 170
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 171
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 172
+#define GCC_USB30_PRIM_MASTER_CLK 173
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 174
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 175
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 176
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 177
+#define GCC_USB30_PRIM_SLEEP_CLK 178
+#define GCC_USB3_PRIM_PHY_AUX_CLK 179
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 180
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 181
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 182
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 183
+#define GCC_VIDEO_AHB_CLK 184
+#define GCC_VIDEO_AXI0_CLK 185
+#define GCC_VIDEO_AXI1_CLK 186
+#define GCC_VIDEO_XO_CLK 187
+#define GCC_GPLL0_AO 188
+#define GCC_GPLL0_OUT_EVEN_AO 189
+#define GCC_GPLL1_AO 190
+#define GCC_GPLL3_AO 191
+#define GCC_GPLL4_AO 192
+#define GCC_GPLL6_AO 193
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_1_BCR 17
+#define GCC_QUPV3_WRAPPER_2_BCR 18
+#define GCC_QUPV3_WRAPPER_3_BCR 19
+#define GCC_QUPV3_WRAPPER_I2C_BCR 20
+#define GCC_QUSB2PHY_PRIM_BCR 21
+#define GCC_QUSB2PHY_SEC_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_SDCC4_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_BCR 27
+#define GCC_USB3_DP_PHY_SEC_BCR 28
+#define GCC_USB3_PHY_PRIM_BCR 29
+#define GCC_USB3_PHY_SEC_BCR 30
+#define GCC_USB3PHY_PHY_PRIM_BCR 31
+#define GCC_USB3PHY_PHY_SEC_BCR 32
+#define GCC_VIDEO_AXI0_CLK_ARES 33
+#define GCC_VIDEO_AXI1_CLK_ARES 34
+#define GCC_VIDEO_BCR 35
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-gpucc.h b/include/dt-bindings/clock/qcom,sm8650-gpucc.h
new file mode 100644
index 000000000000..d0dc457cfe75
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-gpucc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_ACCU_SHIFT_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_DEMET_CLK 7
+#define GPU_CC_DPM_CLK 8
+#define GPU_CC_FF_CLK_SRC 9
+#define GPU_CC_FREQ_MEASURE_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_ACCU_SHIFT_CLK 12
+#define GPU_CC_GX_FF_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_RDVM_CLK 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 18
+#define GPU_CC_HUB_AON_CLK 19
+#define GPU_CC_HUB_CLK_SRC 20
+#define GPU_CC_HUB_CX_INT_CLK 21
+#define GPU_CC_HUB_DIV_CLK_SRC 22
+#define GPU_CC_MEMNOC_GFX_CLK 23
+#define GPU_CC_PLL0 24
+#define GPU_CC_PLL1 25
+#define GPU_CC_SLEEP_CLK 26
+
+/* GDSCs */
+#define GPU_GX_GDSC 0
+#define GPU_CX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-tcsr.h b/include/dt-bindings/clock/qcom,sm8650-tcsr.h
new file mode 100644
index 000000000000..b2c72d492f1f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-tcsr.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H
+
+/* TCSR CC clocks */
+#define TCSR_PCIE_0_CLKREF_EN 0
+#define TCSR_PCIE_1_CLKREF_EN 1
+#define TCSR_UFS_CLKREF_EN 2
+#define TCSR_UFS_PAD_CLKREF_EN 3
+#define TCSR_USB2_CLKREF_EN 4
+#define TCSR_USB3_CLKREF_EN 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h
index e24ee840cfdb..c557b78dc572 100644
--- a/include/dt-bindings/clock/qcom,videocc-sm8150.h
+++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h
@@ -16,6 +16,10 @@
/* VIDEO_CC Resets */
#define VIDEO_CC_MVSC_CORE_CLK_BCR 0
+#define VIDEO_CC_INTERFACE_BCR 1
+#define VIDEO_CC_MVS0_BCR 2
+#define VIDEO_CC_MVS1_BCR 3
+#define VIDEO_CC_MVSC_BCR 4
/* VIDEO_CC GDSCRs */
#define VENUS_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
new file mode 100644
index 000000000000..24ba9e2a5cf6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_USB_NORTH_AXI_CLK 0
+#define GCC_AGGRE_NOC_USB_SOUTH_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_MP_AXI_CLK 4
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 5
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 6
+#define GCC_AGGRE_USB3_TERT_AXI_CLK 7
+#define GCC_AGGRE_USB4_0_AXI_CLK 8
+#define GCC_AGGRE_USB4_1_AXI_CLK 9
+#define GCC_AGGRE_USB4_2_AXI_CLK 10
+#define GCC_AGGRE_USB_NOC_AXI_CLK 11
+#define GCC_AV1E_AHB_CLK 12
+#define GCC_AV1E_AXI_CLK 13
+#define GCC_AV1E_XO_CLK 14
+#define GCC_BOOT_ROM_AHB_CLK 15
+#define GCC_CAMERA_AHB_CLK 16
+#define GCC_CAMERA_HF_AXI_CLK 17
+#define GCC_CAMERA_SF_AXI_CLK 18
+#define GCC_CAMERA_XO_CLK 19
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 20
+#define GCC_CFG_NOC_PCIE_ANOC_NORTH_AHB_CLK 21
+#define GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK 22
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 23
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 24
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 25
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 26
+#define GCC_CFG_NOC_USB3_TERT_AXI_CLK 27
+#define GCC_CFG_NOC_USB_ANOC_AHB_CLK 28
+#define GCC_CFG_NOC_USB_ANOC_NORTH_AHB_CLK 29
+#define GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK 30
+#define GCC_CNOC_PCIE1_TUNNEL_CLK 31
+#define GCC_CNOC_PCIE2_TUNNEL_CLK 32
+#define GCC_CNOC_PCIE_NORTH_SF_AXI_CLK 33
+#define GCC_CNOC_PCIE_SOUTH_SF_AXI_CLK 34
+#define GCC_CNOC_PCIE_TUNNEL_CLK 35
+#define GCC_DDRSS_GPU_AXI_CLK 36
+#define GCC_DISP_AHB_CLK 37
+#define GCC_DISP_HF_AXI_CLK 38
+#define GCC_DISP_XO_CLK 39
+#define GCC_GP1_CLK 40
+#define GCC_GP1_CLK_SRC 41
+#define GCC_GP2_CLK 42
+#define GCC_GP2_CLK_SRC 43
+#define GCC_GP3_CLK 44
+#define GCC_GP3_CLK_SRC 45
+#define GCC_GPLL0 46
+#define GCC_GPLL0_OUT_EVEN 47
+#define GCC_GPLL4 48
+#define GCC_GPLL7 49
+#define GCC_GPLL8 50
+#define GCC_GPLL9 51
+#define GCC_GPU_CFG_AHB_CLK 52
+#define GCC_GPU_GPLL0_CPH_CLK_SRC 53
+#define GCC_GPU_GPLL0_DIV_CPH_CLK_SRC 54
+#define GCC_GPU_MEMNOC_GFX_CLK 55
+#define GCC_GPU_SNOC_DVM_GFX_CLK 56
+#define GCC_PCIE0_PHY_RCHNG_CLK 57
+#define GCC_PCIE1_PHY_RCHNG_CLK 58
+#define GCC_PCIE2_PHY_RCHNG_CLK 59
+#define GCC_PCIE_0_AUX_CLK 60
+#define GCC_PCIE_0_AUX_CLK_SRC 61
+#define GCC_PCIE_0_CFG_AHB_CLK 62
+#define GCC_PCIE_0_MSTR_AXI_CLK 63
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 64
+#define GCC_PCIE_0_PIPE_CLK 65
+#define GCC_PCIE_0_SLV_AXI_CLK 66
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 67
+#define GCC_PCIE_1_AUX_CLK 68
+#define GCC_PCIE_1_AUX_CLK_SRC 69
+#define GCC_PCIE_1_CFG_AHB_CLK 70
+#define GCC_PCIE_1_MSTR_AXI_CLK 71
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 72
+#define GCC_PCIE_1_PIPE_CLK 73
+#define GCC_PCIE_1_SLV_AXI_CLK 74
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 75
+#define GCC_PCIE_2_AUX_CLK 76
+#define GCC_PCIE_2_AUX_CLK_SRC 77
+#define GCC_PCIE_2_CFG_AHB_CLK 78
+#define GCC_PCIE_2_MSTR_AXI_CLK 79
+#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 80
+#define GCC_PCIE_2_PIPE_CLK 81
+#define GCC_PCIE_2_SLV_AXI_CLK 82
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 83
+#define GCC_PCIE_3_AUX_CLK 84
+#define GCC_PCIE_3_AUX_CLK_SRC 85
+#define GCC_PCIE_3_CFG_AHB_CLK 86
+#define GCC_PCIE_3_MSTR_AXI_CLK 87
+#define GCC_PCIE_3_PHY_AUX_CLK 88
+#define GCC_PCIE_3_PHY_RCHNG_CLK 89
+#define GCC_PCIE_3_PHY_RCHNG_CLK_SRC 90
+#define GCC_PCIE_3_PIPE_CLK 91
+#define GCC_PCIE_3_PIPE_DIV_CLK_SRC 92
+#define GCC_PCIE_3_PIPEDIV2_CLK 93
+#define GCC_PCIE_3_SLV_AXI_CLK 94
+#define GCC_PCIE_3_SLV_Q2A_AXI_CLK 95
+#define GCC_PCIE_4_AUX_CLK 96
+#define GCC_PCIE_4_AUX_CLK_SRC 97
+#define GCC_PCIE_4_CFG_AHB_CLK 98
+#define GCC_PCIE_4_MSTR_AXI_CLK 99
+#define GCC_PCIE_4_PHY_RCHNG_CLK 100
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 101
+#define GCC_PCIE_4_PIPE_CLK 102
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 103
+#define GCC_PCIE_4_PIPEDIV2_CLK 104
+#define GCC_PCIE_4_SLV_AXI_CLK 105
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 106
+#define GCC_PCIE_5_AUX_CLK 107
+#define GCC_PCIE_5_AUX_CLK_SRC 108
+#define GCC_PCIE_5_CFG_AHB_CLK 109
+#define GCC_PCIE_5_MSTR_AXI_CLK 110
+#define GCC_PCIE_5_PHY_RCHNG_CLK 111
+#define GCC_PCIE_5_PHY_RCHNG_CLK_SRC 112
+#define GCC_PCIE_5_PIPE_CLK 113
+#define GCC_PCIE_5_PIPE_DIV_CLK_SRC 114
+#define GCC_PCIE_5_PIPEDIV2_CLK 115
+#define GCC_PCIE_5_SLV_AXI_CLK 116
+#define GCC_PCIE_5_SLV_Q2A_AXI_CLK 117
+#define GCC_PCIE_6A_AUX_CLK 118
+#define GCC_PCIE_6A_AUX_CLK_SRC 119
+#define GCC_PCIE_6A_CFG_AHB_CLK 120
+#define GCC_PCIE_6A_MSTR_AXI_CLK 121
+#define GCC_PCIE_6A_PHY_AUX_CLK 122
+#define GCC_PCIE_6A_PHY_RCHNG_CLK 123
+#define GCC_PCIE_6A_PHY_RCHNG_CLK_SRC 124
+#define GCC_PCIE_6A_PIPE_CLK 125
+#define GCC_PCIE_6A_PIPE_DIV_CLK_SRC 126
+#define GCC_PCIE_6A_PIPEDIV2_CLK 127
+#define GCC_PCIE_6A_SLV_AXI_CLK 128
+#define GCC_PCIE_6A_SLV_Q2A_AXI_CLK 129
+#define GCC_PCIE_6B_AUX_CLK 130
+#define GCC_PCIE_6B_AUX_CLK_SRC 131
+#define GCC_PCIE_6B_CFG_AHB_CLK 132
+#define GCC_PCIE_6B_MSTR_AXI_CLK 133
+#define GCC_PCIE_6B_PHY_AUX_CLK 134
+#define GCC_PCIE_6B_PHY_RCHNG_CLK 135
+#define GCC_PCIE_6B_PHY_RCHNG_CLK_SRC 136
+#define GCC_PCIE_6B_PIPE_CLK 137
+#define GCC_PCIE_6B_PIPE_DIV_CLK_SRC 138
+#define GCC_PCIE_6B_PIPEDIV2_CLK 139
+#define GCC_PCIE_6B_SLV_AXI_CLK 140
+#define GCC_PCIE_6B_SLV_Q2A_AXI_CLK 141
+#define GCC_PCIE_RSCC_AHB_CLK 142
+#define GCC_PCIE_RSCC_XO_CLK 143
+#define GCC_PCIE_RSCC_XO_CLK_SRC 144
+#define GCC_PDM2_CLK 145
+#define GCC_PDM2_CLK_SRC 146
+#define GCC_PDM_AHB_CLK 147
+#define GCC_PDM_XO4_CLK 148
+#define GCC_QMIP_AV1E_AHB_CLK 149
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 150
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 151
+#define GCC_QMIP_DISP_AHB_CLK 152
+#define GCC_QMIP_GPU_AHB_CLK 153
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 154
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 155
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 156
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 157
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 158
+#define GCC_QUPV3_WRAP0_CORE_CLK 159
+#define GCC_QUPV3_WRAP0_QSPI_S2_CLK 160
+#define GCC_QUPV3_WRAP0_QSPI_S3_CLK 161
+#define GCC_QUPV3_WRAP0_S0_CLK 162
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 163
+#define GCC_QUPV3_WRAP0_S1_CLK 164
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 165
+#define GCC_QUPV3_WRAP0_S2_CLK 166
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 167
+#define GCC_QUPV3_WRAP0_S2_DIV_CLK_SRC 168
+#define GCC_QUPV3_WRAP0_S3_CLK 169
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 170
+#define GCC_QUPV3_WRAP0_S3_DIV_CLK_SRC 171
+#define GCC_QUPV3_WRAP0_S4_CLK 172
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 173
+#define GCC_QUPV3_WRAP0_S5_CLK 174
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 175
+#define GCC_QUPV3_WRAP0_S6_CLK 176
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 177
+#define GCC_QUPV3_WRAP0_S7_CLK 178
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 179
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 180
+#define GCC_QUPV3_WRAP1_CORE_CLK 181
+#define GCC_QUPV3_WRAP1_QSPI_S2_CLK 182
+#define GCC_QUPV3_WRAP1_QSPI_S3_CLK 183
+#define GCC_QUPV3_WRAP1_S0_CLK 184
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 185
+#define GCC_QUPV3_WRAP1_S1_CLK 186
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 187
+#define GCC_QUPV3_WRAP1_S2_CLK 188
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 189
+#define GCC_QUPV3_WRAP1_S2_DIV_CLK_SRC 190
+#define GCC_QUPV3_WRAP1_S3_CLK 191
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 192
+#define GCC_QUPV3_WRAP1_S3_DIV_CLK_SRC 193
+#define GCC_QUPV3_WRAP1_S4_CLK 194
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 195
+#define GCC_QUPV3_WRAP1_S5_CLK 196
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 197
+#define GCC_QUPV3_WRAP1_S6_CLK 198
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 199
+#define GCC_QUPV3_WRAP1_S7_CLK 200
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 201
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 202
+#define GCC_QUPV3_WRAP2_CORE_CLK 203
+#define GCC_QUPV3_WRAP2_QSPI_S2_CLK 204
+#define GCC_QUPV3_WRAP2_QSPI_S3_CLK 205
+#define GCC_QUPV3_WRAP2_S0_CLK 206
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 207
+#define GCC_QUPV3_WRAP2_S1_CLK 208
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 209
+#define GCC_QUPV3_WRAP2_S2_CLK 210
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 211
+#define GCC_QUPV3_WRAP2_S2_DIV_CLK_SRC 212
+#define GCC_QUPV3_WRAP2_S3_CLK 213
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 214
+#define GCC_QUPV3_WRAP2_S3_DIV_CLK_SRC 215
+#define GCC_QUPV3_WRAP2_S4_CLK 216
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 217
+#define GCC_QUPV3_WRAP2_S5_CLK 218
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 219
+#define GCC_QUPV3_WRAP2_S6_CLK 220
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 221
+#define GCC_QUPV3_WRAP2_S7_CLK 222
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 223
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 224
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 225
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 226
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 227
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 228
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 229
+#define GCC_SDCC2_AHB_CLK 230
+#define GCC_SDCC2_APPS_CLK 231
+#define GCC_SDCC2_APPS_CLK_SRC 232
+#define GCC_SDCC4_AHB_CLK 233
+#define GCC_SDCC4_APPS_CLK 234
+#define GCC_SDCC4_APPS_CLK_SRC 235
+#define GCC_SYS_NOC_USB_AXI_CLK 236
+#define GCC_UFS_PHY_AHB_CLK 237
+#define GCC_UFS_PHY_AXI_CLK 238
+#define GCC_UFS_PHY_AXI_CLK_SRC 239
+#define GCC_UFS_PHY_ICE_CORE_CLK 240
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 241
+#define GCC_UFS_PHY_PHY_AUX_CLK 242
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 243
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 244
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 245
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 246
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 247
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 248
+#define GCC_USB20_MASTER_CLK 249
+#define GCC_USB20_MASTER_CLK_SRC 250
+#define GCC_USB20_MOCK_UTMI_CLK 251
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 252
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 253
+#define GCC_USB20_SLEEP_CLK 254
+#define GCC_USB30_MP_MASTER_CLK 255
+#define GCC_USB30_MP_MASTER_CLK_SRC 256
+#define GCC_USB30_MP_MOCK_UTMI_CLK 257
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 258
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 259
+#define GCC_USB30_MP_SLEEP_CLK 260
+#define GCC_USB30_PRIM_MASTER_CLK 261
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 262
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 263
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 264
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 265
+#define GCC_USB30_PRIM_SLEEP_CLK 266
+#define GCC_USB30_SEC_MASTER_CLK 267
+#define GCC_USB30_SEC_MASTER_CLK_SRC 268
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 269
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 270
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 271
+#define GCC_USB30_SEC_SLEEP_CLK 272
+#define GCC_USB30_TERT_MASTER_CLK 273
+#define GCC_USB30_TERT_MASTER_CLK_SRC 274
+#define GCC_USB30_TERT_MOCK_UTMI_CLK 275
+#define GCC_USB30_TERT_MOCK_UTMI_CLK_SRC 276
+#define GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC 277
+#define GCC_USB30_TERT_SLEEP_CLK 278
+#define GCC_USB3_MP_PHY_AUX_CLK 279
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 280
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 281
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 282
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 283
+#define GCC_USB3_PRIM_PHY_AUX_CLK 284
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 285
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 286
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 287
+#define GCC_USB3_SEC_PHY_AUX_CLK 288
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 289
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 290
+#define GCC_USB3_SEC_PHY_PIPE_CLK 291
+#define GCC_USB3_TERT_PHY_AUX_CLK 292
+#define GCC_USB3_TERT_PHY_AUX_CLK_SRC 293
+#define GCC_USB3_TERT_PHY_COM_AUX_CLK 294
+#define GCC_USB3_TERT_PHY_PIPE_CLK 295
+#define GCC_USB4_0_CFG_AHB_CLK 296
+#define GCC_USB4_0_DP0_CLK 297
+#define GCC_USB4_0_DP1_CLK 298
+#define GCC_USB4_0_MASTER_CLK 299
+#define GCC_USB4_0_MASTER_CLK_SRC 300
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK 301
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK 302
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC 303
+#define GCC_USB4_0_PHY_RX0_CLK 304
+#define GCC_USB4_0_PHY_RX1_CLK 305
+#define GCC_USB4_0_PHY_USB_PIPE_CLK 306
+#define GCC_USB4_0_SB_IF_CLK 307
+#define GCC_USB4_0_SB_IF_CLK_SRC 308
+#define GCC_USB4_0_SYS_CLK 309
+#define GCC_USB4_0_TMU_CLK 310
+#define GCC_USB4_0_TMU_CLK_SRC 311
+#define GCC_USB4_1_CFG_AHB_CLK 312
+#define GCC_USB4_1_DP0_CLK 313
+#define GCC_USB4_1_DP1_CLK 314
+#define GCC_USB4_1_MASTER_CLK 315
+#define GCC_USB4_1_MASTER_CLK_SRC 316
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 317
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 318
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 319
+#define GCC_USB4_1_PHY_RX0_CLK 320
+#define GCC_USB4_1_PHY_RX1_CLK 321
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 322
+#define GCC_USB4_1_SB_IF_CLK 323
+#define GCC_USB4_1_SB_IF_CLK_SRC 324
+#define GCC_USB4_1_SYS_CLK 325
+#define GCC_USB4_1_TMU_CLK 326
+#define GCC_USB4_1_TMU_CLK_SRC 327
+#define GCC_USB4_2_CFG_AHB_CLK 328
+#define GCC_USB4_2_DP0_CLK 329
+#define GCC_USB4_2_DP1_CLK 330
+#define GCC_USB4_2_MASTER_CLK 331
+#define GCC_USB4_2_MASTER_CLK_SRC 332
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK 333
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK 334
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC 335
+#define GCC_USB4_2_PHY_RX0_CLK 336
+#define GCC_USB4_2_PHY_RX1_CLK 337
+#define GCC_USB4_2_PHY_USB_PIPE_CLK 338
+#define GCC_USB4_2_SB_IF_CLK 339
+#define GCC_USB4_2_SB_IF_CLK_SRC 340
+#define GCC_USB4_2_SYS_CLK 341
+#define GCC_USB4_2_TMU_CLK 342
+#define GCC_USB4_2_TMU_CLK_SRC 343
+#define GCC_VIDEO_AHB_CLK 344
+#define GCC_VIDEO_AXI0_CLK 345
+#define GCC_VIDEO_AXI1_CLK 346
+#define GCC_VIDEO_XO_CLK 347
+#define GCC_PCIE_3_PIPE_CLK_SRC 348
+#define GCC_PCIE_4_PIPE_CLK_SRC 349
+#define GCC_PCIE_5_PIPE_CLK_SRC 350
+#define GCC_PCIE_6A_PIPE_CLK_SRC 351
+#define GCC_PCIE_6B_PIPE_CLK_SRC 352
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 353
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 354
+#define GCC_USB3_TERT_PHY_PIPE_CLK_SRC 355
+
+/* GCC power domains */
+#define GCC_PCIE_0_TUNNEL_GDSC 0
+#define GCC_PCIE_1_TUNNEL_GDSC 1
+#define GCC_PCIE_2_TUNNEL_GDSC 2
+#define GCC_PCIE_3_GDSC 3
+#define GCC_PCIE_3_PHY_GDSC 4
+#define GCC_PCIE_4_GDSC 5
+#define GCC_PCIE_4_PHY_GDSC 6
+#define GCC_PCIE_5_GDSC 7
+#define GCC_PCIE_5_PHY_GDSC 8
+#define GCC_PCIE_6_PHY_GDSC 9
+#define GCC_PCIE_6A_GDSC 10
+#define GCC_PCIE_6B_GDSC 11
+#define GCC_UFS_MEM_PHY_GDSC 12
+#define GCC_UFS_PHY_GDSC 13
+#define GCC_USB20_PRIM_GDSC 14
+#define GCC_USB30_MP_GDSC 15
+#define GCC_USB30_PRIM_GDSC 16
+#define GCC_USB30_SEC_GDSC 17
+#define GCC_USB30_TERT_GDSC 18
+#define GCC_USB3_MP_SS0_PHY_GDSC 19
+#define GCC_USB3_MP_SS1_PHY_GDSC 20
+#define GCC_USB4_0_GDSC 21
+#define GCC_USB4_1_GDSC 22
+#define GCC_USB4_2_GDSC 23
+#define GCC_USB_0_PHY_GDSC 24
+#define GCC_USB_1_PHY_GDSC 25
+#define GCC_USB_2_PHY_GDSC 26
+
+/* GCC resets */
+#define GCC_AV1E_BCR 0
+#define GCC_CAMERA_BCR 1
+#define GCC_DISPLAY_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_0_TUNNEL_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_1_TUNNEL_BCR 13
+#define GCC_PCIE_2_LINK_DOWN_BCR 14
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 15
+#define GCC_PCIE_2_PHY_BCR 16
+#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 17
+#define GCC_PCIE_2_TUNNEL_BCR 18
+#define GCC_PCIE_3_BCR 19
+#define GCC_PCIE_3_LINK_DOWN_BCR 20
+#define GCC_PCIE_3_NOCSR_COM_PHY_BCR 21
+#define GCC_PCIE_3_PHY_BCR 22
+#define GCC_PCIE_3_PHY_NOCSR_COM_PHY_BCR 23
+#define GCC_PCIE_4_BCR 24
+#define GCC_PCIE_4_LINK_DOWN_BCR 25
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 26
+#define GCC_PCIE_4_PHY_BCR 27
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 28
+#define GCC_PCIE_5_BCR 29
+#define GCC_PCIE_5_LINK_DOWN_BCR 30
+#define GCC_PCIE_5_NOCSR_COM_PHY_BCR 31
+#define GCC_PCIE_5_PHY_BCR 32
+#define GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR 33
+#define GCC_PCIE_6A_BCR 34
+#define GCC_PCIE_6A_LINK_DOWN_BCR 35
+#define GCC_PCIE_6A_NOCSR_COM_PHY_BCR 36
+#define GCC_PCIE_6A_PHY_BCR 37
+#define GCC_PCIE_6A_PHY_NOCSR_COM_PHY_BCR 38
+#define GCC_PCIE_6B_BCR 39
+#define GCC_PCIE_6B_LINK_DOWN_BCR 40
+#define GCC_PCIE_6B_NOCSR_COM_PHY_BCR 41
+#define GCC_PCIE_6B_PHY_BCR 42
+#define GCC_PCIE_6B_PHY_NOCSR_COM_PHY_BCR 43
+#define GCC_PCIE_PHY_BCR 44
+#define GCC_PCIE_PHY_CFG_AHB_BCR 45
+#define GCC_PCIE_PHY_COM_BCR 46
+#define GCC_PCIE_RSCC_BCR 47
+#define GCC_PDM_BCR 48
+#define GCC_QUPV3_WRAPPER_0_BCR 49
+#define GCC_QUPV3_WRAPPER_1_BCR 50
+#define GCC_QUPV3_WRAPPER_2_BCR 51
+#define GCC_QUSB2PHY_HS0_MP_BCR 52
+#define GCC_QUSB2PHY_HS1_MP_BCR 53
+#define GCC_QUSB2PHY_PRIM_BCR 54
+#define GCC_QUSB2PHY_SEC_BCR 55
+#define GCC_QUSB2PHY_TERT_BCR 56
+#define GCC_QUSB2PHY_USB20_HS_BCR 57
+#define GCC_SDCC2_BCR 58
+#define GCC_SDCC4_BCR 59
+#define GCC_UFS_PHY_BCR 60
+#define GCC_USB20_PRIM_BCR 61
+#define GCC_USB30_MP_BCR 62
+#define GCC_USB30_PRIM_BCR 63
+#define GCC_USB30_SEC_BCR 64
+#define GCC_USB30_TERT_BCR 65
+#define GCC_USB3_MP_SS0_PHY_BCR 66
+#define GCC_USB3_MP_SS1_PHY_BCR 67
+#define GCC_USB3_PHY_PRIM_BCR 68
+#define GCC_USB3_PHY_SEC_BCR 69
+#define GCC_USB3_PHY_TERT_BCR 70
+#define GCC_USB3_UNIPHY_MP0_BCR 71
+#define GCC_USB3_UNIPHY_MP1_BCR 72
+#define GCC_USB3PHY_PHY_PRIM_BCR 73
+#define GCC_USB3PHY_PHY_SEC_BCR 74
+#define GCC_USB3PHY_PHY_TERT_BCR 75
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 76
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 77
+#define GCC_USB4_0_BCR 78
+#define GCC_USB4_0_DP0_PHY_PRIM_BCR 79
+#define GCC_USB4_1_DP0_PHY_SEC_BCR 80
+#define GCC_USB4_2_DP0_PHY_TERT_BCR 81
+#define GCC_USB4_1_BCR 82
+#define GCC_USB4_2_BCR 83
+#define GCC_USB_0_PHY_BCR 84
+#define GCC_USB_1_PHY_BCR 85
+#define GCC_USB_2_PHY_BCR 86
+#define GCC_VIDEO_BCR 87
+#endif
diff --git a/include/dt-bindings/clock/r8a779f0-cpg-mssr.h b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
index f2ae1c6a82dd..c34be5624954 100644
--- a/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h
new file mode 100644
index 000000000000..410725b778a8
--- /dev/null
+++ b/include/dt-bindings/clock/r9a08g045-cpg.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A08G045 CPG Core Clocks */
+#define R9A08G045_CLK_I 0
+#define R9A08G045_CLK_I2 1
+#define R9A08G045_CLK_I3 2
+#define R9A08G045_CLK_S0 3
+#define R9A08G045_CLK_SPI0 4
+#define R9A08G045_CLK_SPI1 5
+#define R9A08G045_CLK_SD0 6
+#define R9A08G045_CLK_SD1 7
+#define R9A08G045_CLK_SD2 8
+#define R9A08G045_CLK_M0 9
+#define R9A08G045_CLK_HP 10
+#define R9A08G045_CLK_TSU 11
+#define R9A08G045_CLK_ZT 12
+#define R9A08G045_CLK_P0 13
+#define R9A08G045_CLK_P1 14
+#define R9A08G045_CLK_P2 15
+#define R9A08G045_CLK_P3 16
+#define R9A08G045_CLK_P4 17
+#define R9A08G045_CLK_P5 18
+#define R9A08G045_CLK_AT 19
+#define R9A08G045_CLK_OC0 20
+#define R9A08G045_CLK_OC1 21
+#define R9A08G045_OSCCLK 22
+#define R9A08G045_OSCCLK2 23
+#define R9A08G045_SWD 24
+
+/* R9A08G045 Module Clocks */
+#define R9A08G045_OCTA_ACLK 0
+#define R9A08G045_OCTA_MCLK 1
+#define R9A08G045_CA55_SCLK 2
+#define R9A08G045_CA55_PCLK 3
+#define R9A08G045_CA55_ATCLK 4
+#define R9A08G045_CA55_GICCLK 5
+#define R9A08G045_CA55_PERICLK 6
+#define R9A08G045_CA55_ACLK 7
+#define R9A08G045_CA55_TSCLK 8
+#define R9A08G045_SRAM_ACPU_ACLK0 9
+#define R9A08G045_SRAM_ACPU_ACLK1 10
+#define R9A08G045_SRAM_ACPU_ACLK2 11
+#define R9A08G045_GIC600_GICCLK 12
+#define R9A08G045_IA55_CLK 13
+#define R9A08G045_IA55_PCLK 14
+#define R9A08G045_MHU_PCLK 15
+#define R9A08G045_SYC_CNT_CLK 16
+#define R9A08G045_DMAC_ACLK 17
+#define R9A08G045_DMAC_PCLK 18
+#define R9A08G045_OSTM0_PCLK 19
+#define R9A08G045_OSTM1_PCLK 20
+#define R9A08G045_OSTM2_PCLK 21
+#define R9A08G045_OSTM3_PCLK 22
+#define R9A08G045_OSTM4_PCLK 23
+#define R9A08G045_OSTM5_PCLK 24
+#define R9A08G045_OSTM6_PCLK 25
+#define R9A08G045_OSTM7_PCLK 26
+#define R9A08G045_MTU_X_MCK_MTU3 27
+#define R9A08G045_POE3_CLKM_POE 28
+#define R9A08G045_GPT_PCLK 29
+#define R9A08G045_POEG_A_CLKP 30
+#define R9A08G045_POEG_B_CLKP 31
+#define R9A08G045_POEG_C_CLKP 32
+#define R9A08G045_POEG_D_CLKP 33
+#define R9A08G045_WDT0_PCLK 34
+#define R9A08G045_WDT0_CLK 35
+#define R9A08G045_WDT1_PCLK 36
+#define R9A08G045_WDT1_CLK 37
+#define R9A08G045_WDT2_PCLK 38
+#define R9A08G045_WDT2_CLK 39
+#define R9A08G045_SPI_HCLK 40
+#define R9A08G045_SPI_ACLK 41
+#define R9A08G045_SPI_CLK 42
+#define R9A08G045_SPI_CLKX2 43
+#define R9A08G045_SDHI0_IMCLK 44
+#define R9A08G045_SDHI0_IMCLK2 45
+#define R9A08G045_SDHI0_CLK_HS 46
+#define R9A08G045_SDHI0_ACLK 47
+#define R9A08G045_SDHI1_IMCLK 48
+#define R9A08G045_SDHI1_IMCLK2 49
+#define R9A08G045_SDHI1_CLK_HS 50
+#define R9A08G045_SDHI1_ACLK 51
+#define R9A08G045_SDHI2_IMCLK 52
+#define R9A08G045_SDHI2_IMCLK2 53
+#define R9A08G045_SDHI2_CLK_HS 54
+#define R9A08G045_SDHI2_ACLK 55
+#define R9A08G045_SSI0_PCLK2 56
+#define R9A08G045_SSI0_PCLK_SFR 57
+#define R9A08G045_SSI1_PCLK2 58
+#define R9A08G045_SSI1_PCLK_SFR 59
+#define R9A08G045_SSI2_PCLK2 60
+#define R9A08G045_SSI2_PCLK_SFR 61
+#define R9A08G045_SSI3_PCLK2 62
+#define R9A08G045_SSI3_PCLK_SFR 63
+#define R9A08G045_SRC_CLKP 64
+#define R9A08G045_USB_U2H0_HCLK 65
+#define R9A08G045_USB_U2H1_HCLK 66
+#define R9A08G045_USB_U2P_EXR_CPUCLK 67
+#define R9A08G045_USB_PCLK 68
+#define R9A08G045_ETH0_CLK_AXI 69
+#define R9A08G045_ETH0_CLK_CHI 70
+#define R9A08G045_ETH0_REFCLK 71
+#define R9A08G045_ETH1_CLK_AXI 72
+#define R9A08G045_ETH1_CLK_CHI 73
+#define R9A08G045_ETH1_REFCLK 74
+#define R9A08G045_I2C0_PCLK 75
+#define R9A08G045_I2C1_PCLK 76
+#define R9A08G045_I2C2_PCLK 77
+#define R9A08G045_I2C3_PCLK 78
+#define R9A08G045_SCIF0_CLK_PCK 79
+#define R9A08G045_SCIF1_CLK_PCK 80
+#define R9A08G045_SCIF2_CLK_PCK 81
+#define R9A08G045_SCIF3_CLK_PCK 82
+#define R9A08G045_SCIF4_CLK_PCK 83
+#define R9A08G045_SCIF5_CLK_PCK 84
+#define R9A08G045_SCI0_CLKP 85
+#define R9A08G045_SCI1_CLKP 86
+#define R9A08G045_IRDA_CLKP 87
+#define R9A08G045_RSPI0_CLKB 88
+#define R9A08G045_RSPI1_CLKB 89
+#define R9A08G045_RSPI2_CLKB 90
+#define R9A08G045_RSPI3_CLKB 91
+#define R9A08G045_RSPI4_CLKB 92
+#define R9A08G045_CANFD_PCLK 93
+#define R9A08G045_CANFD_CLK_RAM 94
+#define R9A08G045_GPIO_HCLK 95
+#define R9A08G045_ADC_ADCLK 96
+#define R9A08G045_ADC_PCLK 97
+#define R9A08G045_TSU_PCLK 98
+#define R9A08G045_PDM_PCLK 99
+#define R9A08G045_PDM_CCLK 100
+#define R9A08G045_PCI_ACLK 101
+#define R9A08G045_PCI_CLKL1PM 102
+#define R9A08G045_SPDIF_PCLK 103
+#define R9A08G045_I3C_PCLK 104
+#define R9A08G045_I3C_TCLK 105
+#define R9A08G045_VBAT_BCLK 106
+
+/* R9A08G045 Resets */
+#define R9A08G045_CA55_RST_1_0 0
+#define R9A08G045_CA55_RST_3_0 1
+#define R9A08G045_CA55_RST_4 2
+#define R9A08G045_CA55_RST_5 3
+#define R9A08G045_CA55_RST_6 4
+#define R9A08G045_CA55_RST_7 5
+#define R9A08G045_CA55_RST_8 6
+#define R9A08G045_CA55_RST_9 7
+#define R9A08G045_CA55_RST_10 8
+#define R9A08G045_CA55_RST_11 9
+#define R9A08G045_CA55_RST_12 10
+#define R9A08G045_SRAM_ACPU_ARESETN0 11
+#define R9A08G045_SRAM_ACPU_ARESETN1 12
+#define R9A08G045_SRAM_ACPU_ARESETN2 13
+#define R9A08G045_GIC600_GICRESET_N 14
+#define R9A08G045_GIC600_DBG_GICRESET_N 15
+#define R9A08G045_IA55_RESETN 16
+#define R9A08G045_MHU_RESETN 17
+#define R9A08G045_DMAC_ARESETN 18
+#define R9A08G045_DMAC_RST_ASYNC 19
+#define R9A08G045_SYC_RESETN 20
+#define R9A08G045_OSTM0_PRESETZ 21
+#define R9A08G045_OSTM1_PRESETZ 22
+#define R9A08G045_OSTM2_PRESETZ 23
+#define R9A08G045_OSTM3_PRESETZ 24
+#define R9A08G045_OSTM4_PRESETZ 25
+#define R9A08G045_OSTM5_PRESETZ 26
+#define R9A08G045_OSTM6_PRESETZ 27
+#define R9A08G045_OSTM7_PRESETZ 28
+#define R9A08G045_MTU_X_PRESET_MTU3 29
+#define R9A08G045_POE3_RST_M_REG 30
+#define R9A08G045_GPT_RST_C 31
+#define R9A08G045_POEG_A_RST 32
+#define R9A08G045_POEG_B_RST 33
+#define R9A08G045_POEG_C_RST 34
+#define R9A08G045_POEG_D_RST 35
+#define R9A08G045_WDT0_PRESETN 36
+#define R9A08G045_WDT1_PRESETN 37
+#define R9A08G045_WDT2_PRESETN 38
+#define R9A08G045_SPI_HRESETN 39
+#define R9A08G045_SPI_ARESETN 40
+#define R9A08G045_SDHI0_IXRST 41
+#define R9A08G045_SDHI1_IXRST 42
+#define R9A08G045_SDHI2_IXRST 43
+#define R9A08G045_SSI0_RST_M2_REG 44
+#define R9A08G045_SSI1_RST_M2_REG 45
+#define R9A08G045_SSI2_RST_M2_REG 46
+#define R9A08G045_SSI3_RST_M2_REG 47
+#define R9A08G045_SRC_RST 48
+#define R9A08G045_USB_U2H0_HRESETN 49
+#define R9A08G045_USB_U2H1_HRESETN 50
+#define R9A08G045_USB_U2P_EXL_SYSRST 51
+#define R9A08G045_USB_PRESETN 52
+#define R9A08G045_ETH0_RST_HW_N 53
+#define R9A08G045_ETH1_RST_HW_N 54
+#define R9A08G045_I2C0_MRST 55
+#define R9A08G045_I2C1_MRST 56
+#define R9A08G045_I2C2_MRST 57
+#define R9A08G045_I2C3_MRST 58
+#define R9A08G045_SCIF0_RST_SYSTEM_N 59
+#define R9A08G045_SCIF1_RST_SYSTEM_N 60
+#define R9A08G045_SCIF2_RST_SYSTEM_N 61
+#define R9A08G045_SCIF3_RST_SYSTEM_N 62
+#define R9A08G045_SCIF4_RST_SYSTEM_N 63
+#define R9A08G045_SCIF5_RST_SYSTEM_N 64
+#define R9A08G045_SCI0_RST 65
+#define R9A08G045_SCI1_RST 66
+#define R9A08G045_IRDA_RST 67
+#define R9A08G045_RSPI0_RST 68
+#define R9A08G045_RSPI1_RST 69
+#define R9A08G045_RSPI2_RST 70
+#define R9A08G045_RSPI3_RST 71
+#define R9A08G045_RSPI4_RST 72
+#define R9A08G045_CANFD_RSTP_N 73
+#define R9A08G045_CANFD_RSTC_N 74
+#define R9A08G045_GPIO_RSTN 75
+#define R9A08G045_GPIO_PORT_RESETN 76
+#define R9A08G045_GPIO_SPARE_RESETN 77
+#define R9A08G045_ADC_PRESETN 78
+#define R9A08G045_ADC_ADRST_N 79
+#define R9A08G045_TSU_PRESETN 80
+#define R9A08G045_OCTA_ARESETN 81
+#define R9A08G045_PDM0_PRESETNT 82
+#define R9A08G045_PCI_ARESETN 83
+#define R9A08G045_PCI_RST_B 84
+#define R9A08G045_PCI_RST_GP_B 85
+#define R9A08G045_PCI_RST_PS_B 86
+#define R9A08G045_PCI_RST_RSM_B 87
+#define R9A08G045_PCI_RST_CFG_B 88
+#define R9A08G045_PCI_RST_LOAD_B 89
+#define R9A08G045_SPDIF_RST 90
+#define R9A08G045_I3C_TRESETN 91
+#define R9A08G045_I3C_PRESETN 92
+#define R9A08G045_VBAT_BRESETN 93
+
+#endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */
diff --git a/include/dt-bindings/clock/rockchip,rk3588-cru.h b/include/dt-bindings/clock/rockchip,rk3588-cru.h
index b5616bca7b44..5790b1391201 100644
--- a/include/dt-bindings/clock/rockchip,rk3588-cru.h
+++ b/include/dt-bindings/clock/rockchip,rk3588-cru.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (c) 2021 Rockchip Electronics Co. Ltd.
* Copyright (c) 2022 Collabora Ltd.
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
index 42133af6d6b9..3065375c2d8b 100644
--- a/include/dt-bindings/clock/samsung,exynosautov9.h
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -166,16 +166,12 @@
#define GOUT_CLKCMU_PERIC1_IP 248
#define GOUT_CLKCMU_PERIS_BUS 249
-#define TOP_NR_CLK 250
-
/* CMU_BUSMC */
#define CLK_MOUT_BUSMC_BUS_USER 1
#define CLK_DOUT_BUSMC_BUSP 2
#define CLK_GOUT_BUSMC_PDMA0_PCLK 3
#define CLK_GOUT_BUSMC_SPDMA_PCLK 4
-#define BUSMC_NR_CLK 5
-
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
#define CLK_DOUT_CORE_BUSP 2
@@ -183,8 +179,6 @@
#define CLK_GOUT_CORE_CCI_PCLK 4
#define CLK_GOUT_CORE_CMU_CORE_PCLK 5
-#define CORE_NR_CLK 6
-
/* CMU_FSYS0 */
#define CLK_MOUT_FSYS0_BUS_USER 1
#define CLK_MOUT_FSYS0_PCIE_USER 2
@@ -226,8 +220,6 @@
#define CLK_GOUT_FSYS0_PCIE_GEN3A_4L_CLK 35
#define CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK 36
-#define FSYS0_NR_CLK 37
-
/* CMU_FSYS1 */
#define FOUT_MMC_PLL 1
@@ -251,8 +243,6 @@
#define CLK_GOUT_FSYS1_USB30_0_ACLK 17
#define CLK_GOUT_FSYS1_USB30_1_ACLK 18
-#define FSYS1_NR_CLK 19
-
/* CMU_FSYS2 */
#define CLK_MOUT_FSYS2_BUS_USER 1
#define CLK_MOUT_FSYS2_UFS_EMBD_USER 2
@@ -262,8 +252,6 @@
#define CLK_GOUT_FSYS2_UFS_EMBD1_ACLK 6
#define CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO 7
-#define FSYS2_NR_CLK 8
-
/* CMU_PERIC0 */
#define CLK_MOUT_PERIC0_BUS_USER 1
#define CLK_MOUT_PERIC0_IP_USER 2
@@ -308,8 +296,6 @@
#define CLK_GOUT_PERIC0_PCLK_10 42
#define CLK_GOUT_PERIC0_PCLK_11 43
-#define PERIC0_NR_CLK 44
-
/* CMU_PERIC1 */
#define CLK_MOUT_PERIC1_BUS_USER 1
#define CLK_MOUT_PERIC1_IP_USER 2
@@ -354,14 +340,10 @@
#define CLK_GOUT_PERIC1_PCLK_10 42
#define CLK_GOUT_PERIC1_PCLK_11 43
-#define PERIC1_NR_CLK 44
-
/* CMU_PERIS */
#define CLK_MOUT_PERIS_BUS_USER 1
#define CLK_GOUT_SYSREG_PERIS_PCLK 2
#define CLK_GOUT_WDT_CLUSTER0 3
#define CLK_GOUT_WDT_CLUSTER1 4
-#define PERIS_NR_CLK 5
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H */
diff --git a/include/dt-bindings/clock/sophgo,cv1800.h b/include/dt-bindings/clock/sophgo,cv1800.h
new file mode 100644
index 000000000000..cfbeca25a650
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,cv1800.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Ltd.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_CV1800_CLK_H__
+#define __DT_BINDINGS_SOPHGO_CV1800_CLK_H__
+
+#define CLK_MPLL 0
+#define CLK_TPLL 1
+#define CLK_FPLL 2
+#define CLK_MIPIMPLL 3
+#define CLK_A0PLL 4
+#define CLK_DISPPLL 5
+#define CLK_CAM0PLL 6
+#define CLK_CAM1PLL 7
+
+#define CLK_MIPIMPLL_D3 8
+#define CLK_CAM0PLL_D2 9
+#define CLK_CAM0PLL_D3 10
+
+#define CLK_TPU 11
+#define CLK_TPU_FAB 12
+#define CLK_AHB_ROM 13
+#define CLK_DDR_AXI_REG 14
+#define CLK_RTC_25M 15
+#define CLK_SRC_RTC_SYS_0 16
+#define CLK_TEMPSEN 17
+#define CLK_SARADC 18
+#define CLK_EFUSE 19
+#define CLK_APB_EFUSE 20
+#define CLK_DEBUG 21
+#define CLK_AP_DEBUG 22
+#define CLK_XTAL_MISC 23
+#define CLK_AXI4_EMMC 24
+#define CLK_EMMC 25
+#define CLK_EMMC_100K 26
+#define CLK_AXI4_SD0 27
+#define CLK_SD0 28
+#define CLK_SD0_100K 29
+#define CLK_AXI4_SD1 30
+#define CLK_SD1 31
+#define CLK_SD1_100K 32
+#define CLK_SPI_NAND 33
+#define CLK_ETH0_500M 34
+#define CLK_AXI4_ETH0 35
+#define CLK_ETH1_500M 36
+#define CLK_AXI4_ETH1 37
+#define CLK_APB_GPIO 38
+#define CLK_APB_GPIO_INTR 39
+#define CLK_GPIO_DB 40
+#define CLK_AHB_SF 41
+#define CLK_AHB_SF1 42
+#define CLK_A24M 43
+#define CLK_AUDSRC 44
+#define CLK_APB_AUDSRC 45
+#define CLK_SDMA_AXI 46
+#define CLK_SDMA_AUD0 47
+#define CLK_SDMA_AUD1 48
+#define CLK_SDMA_AUD2 49
+#define CLK_SDMA_AUD3 50
+#define CLK_I2C 51
+#define CLK_APB_I2C 52
+#define CLK_APB_I2C0 53
+#define CLK_APB_I2C1 54
+#define CLK_APB_I2C2 55
+#define CLK_APB_I2C3 56
+#define CLK_APB_I2C4 57
+#define CLK_APB_WDT 58
+#define CLK_PWM_SRC 59
+#define CLK_PWM 60
+#define CLK_SPI 61
+#define CLK_APB_SPI0 62
+#define CLK_APB_SPI1 63
+#define CLK_APB_SPI2 64
+#define CLK_APB_SPI3 65
+#define CLK_1M 66
+#define CLK_CAM0_200 67
+#define CLK_PM 68
+#define CLK_TIMER0 69
+#define CLK_TIMER1 70
+#define CLK_TIMER2 71
+#define CLK_TIMER3 72
+#define CLK_TIMER4 73
+#define CLK_TIMER5 74
+#define CLK_TIMER6 75
+#define CLK_TIMER7 76
+#define CLK_UART0 77
+#define CLK_APB_UART0 78
+#define CLK_UART1 79
+#define CLK_APB_UART1 80
+#define CLK_UART2 81
+#define CLK_APB_UART2 82
+#define CLK_UART3 83
+#define CLK_APB_UART3 84
+#define CLK_UART4 85
+#define CLK_APB_UART4 86
+#define CLK_APB_I2S0 87
+#define CLK_APB_I2S1 88
+#define CLK_APB_I2S2 89
+#define CLK_APB_I2S3 90
+#define CLK_AXI4_USB 91
+#define CLK_APB_USB 92
+#define CLK_USB_125M 93
+#define CLK_USB_33K 94
+#define CLK_USB_12M 95
+#define CLK_AXI4 96
+#define CLK_AXI6 97
+#define CLK_DSI_ESC 98
+#define CLK_AXI_VIP 99
+#define CLK_SRC_VIP_SYS_0 100
+#define CLK_SRC_VIP_SYS_1 101
+#define CLK_SRC_VIP_SYS_2 102
+#define CLK_SRC_VIP_SYS_3 103
+#define CLK_SRC_VIP_SYS_4 104
+#define CLK_CSI_BE_VIP 105
+#define CLK_CSI_MAC0_VIP 106
+#define CLK_CSI_MAC1_VIP 107
+#define CLK_CSI_MAC2_VIP 108
+#define CLK_CSI0_RX_VIP 109
+#define CLK_CSI1_RX_VIP 110
+#define CLK_ISP_TOP_VIP 111
+#define CLK_IMG_D_VIP 112
+#define CLK_IMG_V_VIP 113
+#define CLK_SC_TOP_VIP 114
+#define CLK_SC_D_VIP 115
+#define CLK_SC_V1_VIP 116
+#define CLK_SC_V2_VIP 117
+#define CLK_SC_V3_VIP 118
+#define CLK_DWA_VIP 119
+#define CLK_BT_VIP 120
+#define CLK_DISP_VIP 121
+#define CLK_DSI_MAC_VIP 122
+#define CLK_LVDS0_VIP 123
+#define CLK_LVDS1_VIP 124
+#define CLK_PAD_VI_VIP 125
+#define CLK_PAD_VI1_VIP 126
+#define CLK_PAD_VI2_VIP 127
+#define CLK_CFG_REG_VIP 128
+#define CLK_VIP_IP0 129
+#define CLK_VIP_IP1 130
+#define CLK_VIP_IP2 131
+#define CLK_VIP_IP3 132
+#define CLK_IVE_VIP 133
+#define CLK_RAW_VIP 134
+#define CLK_OSDC_VIP 135
+#define CLK_CAM0_VIP 136
+#define CLK_AXI_VIDEO_CODEC 137
+#define CLK_VC_SRC0 138
+#define CLK_VC_SRC1 139
+#define CLK_VC_SRC2 140
+#define CLK_H264C 141
+#define CLK_APB_H264C 142
+#define CLK_H265C 143
+#define CLK_APB_H265C 144
+#define CLK_JPEG 145
+#define CLK_APB_JPEG 146
+#define CLK_CAM0 147
+#define CLK_CAM1 148
+#define CLK_WGN 149
+#define CLK_WGN0 150
+#define CLK_WGN1 151
+#define CLK_WGN2 152
+#define CLK_KEYSCAN 153
+#define CLK_CFG_REG_VC 154
+#define CLK_C906_0 155
+#define CLK_C906_1 156
+#define CLK_A53 157
+#define CLK_CPU_AXI0 158
+#define CLK_CPU_GIC 159
+#define CLK_XTAL_AP 160
+
+// Only for CV181x
+#define CLK_DISP_SRC_VIP 161
+
+#endif /* __DT_BINDINGS_SOPHGO_CV1800_CLK_H__ */
diff --git a/include/dt-bindings/clock/st,stm32mp25-rcc.h b/include/dt-bindings/clock/st,stm32mp25-rcc.h
new file mode 100644
index 000000000000..b6cf05ad4be6
--- /dev/null
+++ b/include/dt-bindings/clock/st,stm32mp25-rcc.h
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP25_CLKS_H_
+#define _DT_BINDINGS_STM32MP25_CLKS_H_
+
+/* INTERNAL/EXTERNAL OSCILLATORS */
+#define HSI_CK 0
+#define HSE_CK 1
+#define MSI_CK 2
+#define LSI_CK 3
+#define LSE_CK 4
+#define I2S_CK 5
+#define RTC_CK 6
+#define SPDIF_CK_SYMB 7
+
+/* PLL CLOCKS */
+#define PLL1_CK 8
+#define PLL2_CK 9
+#define PLL3_CK 10
+#define PLL4_CK 11
+#define PLL5_CK 12
+#define PLL6_CK 13
+#define PLL7_CK 14
+#define PLL8_CK 15
+
+#define CK_CPU1 16
+
+/* APB DIV CLOCKS */
+#define CK_ICN_APB1 17
+#define CK_ICN_APB2 18
+#define CK_ICN_APB3 19
+#define CK_ICN_APB4 20
+#define CK_ICN_APBDBG 21
+
+/* GLOBAL TIMER */
+#define TIMG1_CK 22
+#define TIMG2_CK 23
+
+/* FLEXGEN CLOCKS */
+#define CK_ICN_HS_MCU 24
+#define CK_ICN_SDMMC 25
+#define CK_ICN_DDR 26
+#define CK_ICN_DISPLAY 27
+#define CK_ICN_HSL 28
+#define CK_ICN_NIC 29
+#define CK_ICN_VID 30
+#define CK_FLEXGEN_07 31
+#define CK_FLEXGEN_08 32
+#define CK_FLEXGEN_09 33
+#define CK_FLEXGEN_10 34
+#define CK_FLEXGEN_11 35
+#define CK_FLEXGEN_12 36
+#define CK_FLEXGEN_13 37
+#define CK_FLEXGEN_14 38
+#define CK_FLEXGEN_15 39
+#define CK_FLEXGEN_16 40
+#define CK_FLEXGEN_17 41
+#define CK_FLEXGEN_18 42
+#define CK_FLEXGEN_19 43
+#define CK_FLEXGEN_20 44
+#define CK_FLEXGEN_21 45
+#define CK_FLEXGEN_22 46
+#define CK_FLEXGEN_23 47
+#define CK_FLEXGEN_24 48
+#define CK_FLEXGEN_25 49
+#define CK_FLEXGEN_26 50
+#define CK_FLEXGEN_27 51
+#define CK_FLEXGEN_28 52
+#define CK_FLEXGEN_29 53
+#define CK_FLEXGEN_30 54
+#define CK_FLEXGEN_31 55
+#define CK_FLEXGEN_32 56
+#define CK_FLEXGEN_33 57
+#define CK_FLEXGEN_34 58
+#define CK_FLEXGEN_35 59
+#define CK_FLEXGEN_36 60
+#define CK_FLEXGEN_37 61
+#define CK_FLEXGEN_38 62
+#define CK_FLEXGEN_39 63
+#define CK_FLEXGEN_40 64
+#define CK_FLEXGEN_41 65
+#define CK_FLEXGEN_42 66
+#define CK_FLEXGEN_43 67
+#define CK_FLEXGEN_44 68
+#define CK_FLEXGEN_45 69
+#define CK_FLEXGEN_46 70
+#define CK_FLEXGEN_47 71
+#define CK_FLEXGEN_48 72
+#define CK_FLEXGEN_49 73
+#define CK_FLEXGEN_50 74
+#define CK_FLEXGEN_51 75
+#define CK_FLEXGEN_52 76
+#define CK_FLEXGEN_53 77
+#define CK_FLEXGEN_54 78
+#define CK_FLEXGEN_55 79
+#define CK_FLEXGEN_56 80
+#define CK_FLEXGEN_57 81
+#define CK_FLEXGEN_58 82
+#define CK_FLEXGEN_59 83
+#define CK_FLEXGEN_60 84
+#define CK_FLEXGEN_61 85
+#define CK_FLEXGEN_62 86
+#define CK_FLEXGEN_63 87
+
+/* LOW SPEED MCU CLOCK */
+#define CK_ICN_LS_MCU 88
+
+#define CK_BUS_STM500 89
+#define CK_BUS_FMC 90
+#define CK_BUS_GPU 91
+#define CK_BUS_ETH1 92
+#define CK_BUS_ETH2 93
+#define CK_BUS_PCIE 94
+#define CK_BUS_DDRPHYC 95
+#define CK_BUS_SYSCPU1 96
+#define CK_BUS_ETHSW 97
+#define CK_BUS_HPDMA1 98
+#define CK_BUS_HPDMA2 99
+#define CK_BUS_HPDMA3 100
+#define CK_BUS_ADC12 101
+#define CK_BUS_ADC3 102
+#define CK_BUS_IPCC1 103
+#define CK_BUS_CCI 104
+#define CK_BUS_CRC 105
+#define CK_BUS_MDF1 106
+#define CK_BUS_OSPIIOM 107
+#define CK_BUS_BKPSRAM 108
+#define CK_BUS_HASH 109
+#define CK_BUS_RNG 110
+#define CK_BUS_CRYP1 111
+#define CK_BUS_CRYP2 112
+#define CK_BUS_SAES 113
+#define CK_BUS_PKA 114
+#define CK_BUS_GPIOA 115
+#define CK_BUS_GPIOB 116
+#define CK_BUS_GPIOC 117
+#define CK_BUS_GPIOD 118
+#define CK_BUS_GPIOE 119
+#define CK_BUS_GPIOF 120
+#define CK_BUS_GPIOG 121
+#define CK_BUS_GPIOH 122
+#define CK_BUS_GPIOI 123
+#define CK_BUS_GPIOJ 124
+#define CK_BUS_GPIOK 125
+#define CK_BUS_LPSRAM1 126
+#define CK_BUS_LPSRAM2 127
+#define CK_BUS_LPSRAM3 128
+#define CK_BUS_GPIOZ 129
+#define CK_BUS_LPDMA 130
+#define CK_BUS_HSEM 131
+#define CK_BUS_IPCC2 132
+#define CK_BUS_RTC 133
+#define CK_BUS_SPI8 134
+#define CK_BUS_LPUART1 135
+#define CK_BUS_I2C8 136
+#define CK_BUS_LPTIM3 137
+#define CK_BUS_LPTIM4 138
+#define CK_BUS_LPTIM5 139
+#define CK_BUS_IWDG5 140
+#define CK_BUS_WWDG2 141
+#define CK_BUS_I3C4 142
+#define CK_BUS_TIM2 143
+#define CK_BUS_TIM3 144
+#define CK_BUS_TIM4 145
+#define CK_BUS_TIM5 146
+#define CK_BUS_TIM6 147
+#define CK_BUS_TIM7 148
+#define CK_BUS_TIM10 149
+#define CK_BUS_TIM11 150
+#define CK_BUS_TIM12 151
+#define CK_BUS_TIM13 152
+#define CK_BUS_TIM14 153
+#define CK_BUS_LPTIM1 154
+#define CK_BUS_LPTIM2 155
+#define CK_BUS_SPI2 156
+#define CK_BUS_SPI3 157
+#define CK_BUS_SPDIFRX 158
+#define CK_BUS_USART2 159
+#define CK_BUS_USART3 160
+#define CK_BUS_UART4 161
+#define CK_BUS_UART5 162
+#define CK_BUS_I2C1 163
+#define CK_BUS_I2C2 164
+#define CK_BUS_I2C3 165
+#define CK_BUS_I2C4 166
+#define CK_BUS_I2C5 167
+#define CK_BUS_I2C6 168
+#define CK_BUS_I2C7 169
+#define CK_BUS_I3C1 170
+#define CK_BUS_I3C2 171
+#define CK_BUS_I3C3 172
+#define CK_BUS_TIM1 173
+#define CK_BUS_TIM8 174
+#define CK_BUS_TIM15 175
+#define CK_BUS_TIM16 176
+#define CK_BUS_TIM17 177
+#define CK_BUS_TIM20 178
+#define CK_BUS_SAI1 179
+#define CK_BUS_SAI2 180
+#define CK_BUS_SAI3 181
+#define CK_BUS_SAI4 182
+#define CK_BUS_USART1 183
+#define CK_BUS_USART6 184
+#define CK_BUS_UART7 185
+#define CK_BUS_UART8 186
+#define CK_BUS_UART9 187
+#define CK_BUS_FDCAN 188
+#define CK_BUS_SPI1 189
+#define CK_BUS_SPI4 190
+#define CK_BUS_SPI5 191
+#define CK_BUS_SPI6 192
+#define CK_BUS_SPI7 193
+#define CK_BUS_BSEC 194
+#define CK_BUS_IWDG1 195
+#define CK_BUS_IWDG2 196
+#define CK_BUS_IWDG3 197
+#define CK_BUS_IWDG4 198
+#define CK_BUS_WWDG1 199
+#define CK_BUS_VREF 200
+#define CK_BUS_DTS 201
+#define CK_BUS_SERC 202
+#define CK_BUS_HDP 203
+#define CK_BUS_IS2M 204
+#define CK_BUS_DSI 205
+#define CK_BUS_LTDC 206
+#define CK_BUS_CSI 207
+#define CK_BUS_DCMIPP 208
+#define CK_BUS_DDRC 209
+#define CK_BUS_DDRCFG 210
+#define CK_BUS_GICV2M 211
+#define CK_BUS_USBTC 212
+#define CK_BUS_USB3PCIEPHY 214
+#define CK_BUS_STGEN 215
+#define CK_BUS_VDEC 216
+#define CK_BUS_VENC 217
+#define CK_SYSDBG 218
+#define CK_KER_TIM2 219
+#define CK_KER_TIM3 220
+#define CK_KER_TIM4 221
+#define CK_KER_TIM5 222
+#define CK_KER_TIM6 223
+#define CK_KER_TIM7 224
+#define CK_KER_TIM10 225
+#define CK_KER_TIM11 226
+#define CK_KER_TIM12 227
+#define CK_KER_TIM13 228
+#define CK_KER_TIM14 229
+#define CK_KER_TIM1 230
+#define CK_KER_TIM8 231
+#define CK_KER_TIM15 232
+#define CK_KER_TIM16 233
+#define CK_KER_TIM17 234
+#define CK_KER_TIM20 235
+#define CK_BUS_SYSRAM 236
+#define CK_BUS_VDERAM 237
+#define CK_BUS_RETRAM 238
+#define CK_BUS_OSPI1 239
+#define CK_BUS_OSPI2 240
+#define CK_BUS_OTFD1 241
+#define CK_BUS_OTFD2 242
+#define CK_BUS_SRAM1 243
+#define CK_BUS_SRAM2 244
+#define CK_BUS_SDMMC1 245
+#define CK_BUS_SDMMC2 246
+#define CK_BUS_SDMMC3 247
+#define CK_BUS_DDR 248
+#define CK_BUS_RISAF4 249
+#define CK_BUS_USB2OHCI 250
+#define CK_BUS_USB2EHCI 251
+#define CK_BUS_USB3DR 252
+#define CK_KER_LPTIM1 253
+#define CK_KER_LPTIM2 254
+#define CK_KER_USART2 255
+#define CK_KER_UART4 256
+#define CK_KER_USART3 257
+#define CK_KER_UART5 258
+#define CK_KER_SPI2 259
+#define CK_KER_SPI3 260
+#define CK_KER_SPDIFRX 261
+#define CK_KER_I2C1 262
+#define CK_KER_I2C2 263
+#define CK_KER_I3C1 264
+#define CK_KER_I3C2 265
+#define CK_KER_I2C3 266
+#define CK_KER_I2C5 267
+#define CK_KER_I3C3 268
+#define CK_KER_I2C4 269
+#define CK_KER_I2C6 270
+#define CK_KER_I2C7 271
+#define CK_KER_SPI1 272
+#define CK_KER_SPI4 273
+#define CK_KER_SPI5 274
+#define CK_KER_SPI6 275
+#define CK_KER_SPI7 276
+#define CK_KER_USART1 277
+#define CK_KER_USART6 278
+#define CK_KER_UART7 279
+#define CK_KER_UART8 280
+#define CK_KER_UART9 281
+#define CK_KER_MDF1 282
+#define CK_KER_SAI1 283
+#define CK_KER_SAI2 284
+#define CK_KER_SAI3 285
+#define CK_KER_SAI4 286
+#define CK_KER_FDCAN 287
+#define CK_KER_DSIBLANE 288
+#define CK_KER_DSIPHY 289
+#define CK_KER_CSI 290
+#define CK_KER_CSITXESC 291
+#define CK_KER_CSIPHY 292
+#define CK_KER_LVDSPHY 293
+#define CK_KER_STGEN 294
+#define CK_KER_USB3PCIEPHY 295
+#define CK_KER_USB2PHY2EN 296
+#define CK_KER_I3C4 297
+#define CK_KER_SPI8 298
+#define CK_KER_I2C8 299
+#define CK_KER_LPUART1 300
+#define CK_KER_LPTIM3 301
+#define CK_KER_LPTIM4 302
+#define CK_KER_LPTIM5 303
+#define CK_KER_TSDBG 304
+#define CK_KER_TPIU 305
+#define CK_BUS_ETR 306
+#define CK_BUS_SYSATB 307
+#define CK_KER_ADC12 308
+#define CK_KER_ADC3 309
+#define CK_KER_OSPI1 310
+#define CK_KER_OSPI2 311
+#define CK_KER_FMC 312
+#define CK_KER_SDMMC1 313
+#define CK_KER_SDMMC2 314
+#define CK_KER_SDMMC3 315
+#define CK_KER_ETH1 316
+#define CK_KER_ETH2 317
+#define CK_KER_ETH1PTP 318
+#define CK_KER_ETH2PTP 319
+#define CK_KER_USB2PHY1 320
+#define CK_KER_USB2PHY2 321
+#define CK_KER_ETHSW 322
+#define CK_KER_ETHSWREF 323
+#define CK_MCO1 324
+#define CK_MCO2 325
+#define CK_KER_DTS 326
+#define CK_ETH1_RX 327
+#define CK_ETH1_TX 328
+#define CK_ETH1_MAC 329
+#define CK_ETH2_RX 330
+#define CK_ETH2_TX 331
+#define CK_ETH2_MAC 332
+#define CK_ETH1_STP 333
+#define CK_ETH2_STP 334
+#define CK_KER_USBTC 335
+#define CK_BUS_ADF1 336
+#define CK_KER_ADF1 337
+#define CK_BUS_LVDS 338
+#define CK_KER_LTDC 339
+#define CK_KER_GPU 340
+#define CK_BUS_ETHSWACMCFG 341
+#define CK_BUS_ETHSWACMMSG 342
+#define HSE_DIV2_CK 343
+
+#define STM32MP25_LAST_CLK 344
+
+#define CK_SCMI_ICN_HS_MCU 0
+#define CK_SCMI_ICN_SDMMC 1
+#define CK_SCMI_ICN_DDR 2
+#define CK_SCMI_ICN_DISPLAY 3
+#define CK_SCMI_ICN_HSL 4
+#define CK_SCMI_ICN_NIC 5
+#define CK_SCMI_ICN_VID 6
+#define CK_SCMI_FLEXGEN_07 7
+#define CK_SCMI_FLEXGEN_08 8
+#define CK_SCMI_FLEXGEN_09 9
+#define CK_SCMI_FLEXGEN_10 10
+#define CK_SCMI_FLEXGEN_11 11
+#define CK_SCMI_FLEXGEN_12 12
+#define CK_SCMI_FLEXGEN_13 13
+#define CK_SCMI_FLEXGEN_14 14
+#define CK_SCMI_FLEXGEN_15 15
+#define CK_SCMI_FLEXGEN_16 16
+#define CK_SCMI_FLEXGEN_17 17
+#define CK_SCMI_FLEXGEN_18 18
+#define CK_SCMI_FLEXGEN_19 19
+#define CK_SCMI_FLEXGEN_20 20
+#define CK_SCMI_FLEXGEN_21 21
+#define CK_SCMI_FLEXGEN_22 22
+#define CK_SCMI_FLEXGEN_23 23
+#define CK_SCMI_FLEXGEN_24 24
+#define CK_SCMI_FLEXGEN_25 25
+#define CK_SCMI_FLEXGEN_26 26
+#define CK_SCMI_FLEXGEN_27 27
+#define CK_SCMI_FLEXGEN_28 28
+#define CK_SCMI_FLEXGEN_29 29
+#define CK_SCMI_FLEXGEN_30 30
+#define CK_SCMI_FLEXGEN_31 31
+#define CK_SCMI_FLEXGEN_32 32
+#define CK_SCMI_FLEXGEN_33 33
+#define CK_SCMI_FLEXGEN_34 34
+#define CK_SCMI_FLEXGEN_35 35
+#define CK_SCMI_FLEXGEN_36 36
+#define CK_SCMI_FLEXGEN_37 37
+#define CK_SCMI_FLEXGEN_38 38
+#define CK_SCMI_FLEXGEN_39 39
+#define CK_SCMI_FLEXGEN_40 40
+#define CK_SCMI_FLEXGEN_41 41
+#define CK_SCMI_FLEXGEN_42 42
+#define CK_SCMI_FLEXGEN_43 43
+#define CK_SCMI_FLEXGEN_44 44
+#define CK_SCMI_FLEXGEN_45 45
+#define CK_SCMI_FLEXGEN_46 46
+#define CK_SCMI_FLEXGEN_47 47
+#define CK_SCMI_FLEXGEN_48 48
+#define CK_SCMI_FLEXGEN_49 49
+#define CK_SCMI_FLEXGEN_50 50
+#define CK_SCMI_FLEXGEN_51 51
+#define CK_SCMI_FLEXGEN_52 52
+#define CK_SCMI_FLEXGEN_53 53
+#define CK_SCMI_FLEXGEN_54 54
+#define CK_SCMI_FLEXGEN_55 55
+#define CK_SCMI_FLEXGEN_56 56
+#define CK_SCMI_FLEXGEN_57 57
+#define CK_SCMI_FLEXGEN_58 58
+#define CK_SCMI_FLEXGEN_59 59
+#define CK_SCMI_FLEXGEN_60 60
+#define CK_SCMI_FLEXGEN_61 61
+#define CK_SCMI_FLEXGEN_62 62
+#define CK_SCMI_FLEXGEN_63 63
+#define CK_SCMI_ICN_LS_MCU 64
+#define CK_SCMI_HSE 65
+#define CK_SCMI_LSE 66
+#define CK_SCMI_HSI 67
+#define CK_SCMI_LSI 68
+#define CK_SCMI_MSI 69
+#define CK_SCMI_HSE_DIV2 70
+#define CK_SCMI_CPU1 71
+#define CK_SCMI_SYSCPU1 72
+#define CK_SCMI_PLL2 73
+#define CK_SCMI_PLL3 74
+#define CK_SCMI_RTC 75
+#define CK_SCMI_RTCCK 76
+#define CK_SCMI_ICN_APB1 77
+#define CK_SCMI_ICN_APB2 78
+#define CK_SCMI_ICN_APB3 79
+#define CK_SCMI_ICN_APB4 80
+#define CK_SCMI_ICN_APBDBG 81
+#define CK_SCMI_TIMG1 82
+#define CK_SCMI_TIMG2 83
+#define CK_SCMI_BKPSRAM 84
+#define CK_SCMI_BSEC 85
+#define CK_SCMI_ETR 87
+#define CK_SCMI_FMC 88
+#define CK_SCMI_GPIOA 89
+#define CK_SCMI_GPIOB 90
+#define CK_SCMI_GPIOC 91
+#define CK_SCMI_GPIOD 92
+#define CK_SCMI_GPIOE 93
+#define CK_SCMI_GPIOF 94
+#define CK_SCMI_GPIOG 95
+#define CK_SCMI_GPIOH 96
+#define CK_SCMI_GPIOI 97
+#define CK_SCMI_GPIOJ 98
+#define CK_SCMI_GPIOK 99
+#define CK_SCMI_GPIOZ 100
+#define CK_SCMI_HPDMA1 101
+#define CK_SCMI_HPDMA2 102
+#define CK_SCMI_HPDMA3 103
+#define CK_SCMI_HSEM 104
+#define CK_SCMI_IPCC1 105
+#define CK_SCMI_IPCC2 106
+#define CK_SCMI_LPDMA 107
+#define CK_SCMI_RETRAM 108
+#define CK_SCMI_SRAM1 109
+#define CK_SCMI_SRAM2 110
+#define CK_SCMI_LPSRAM1 111
+#define CK_SCMI_LPSRAM2 112
+#define CK_SCMI_LPSRAM3 113
+#define CK_SCMI_VDERAM 114
+#define CK_SCMI_SYSRAM 115
+#define CK_SCMI_OSPI1 116
+#define CK_SCMI_OSPI2 117
+#define CK_SCMI_TPIU 118
+#define CK_SCMI_SYSDBG 119
+#define CK_SCMI_SYSATB 120
+#define CK_SCMI_TSDBG 121
+#define CK_SCMI_STM500 122
+
+#endif /* _DT_BINDINGS_STM32MP25_CLKS_H_ */
diff --git a/include/dt-bindings/clock/starfive,jh7110-crg.h b/include/dt-bindings/clock/starfive,jh7110-crg.h
index 06257bfd9ac1..467ccab3bfaa 100644
--- a/include/dt-bindings/clock/starfive,jh7110-crg.h
+++ b/include/dt-bindings/clock/starfive,jh7110-crg.h
@@ -1,11 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright 2022 StarFive Technology Co., Ltd.
*/
#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__
#define __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__
+/* PLL clocks */
+#define JH7110_PLLCLK_PLL0_OUT 0
+#define JH7110_PLLCLK_PLL1_OUT 1
+#define JH7110_PLLCLK_PLL2_OUT 2
+#define JH7110_PLLCLK_END 3
+
/* SYSCRG clocks */
#define JH7110_SYSCLK_CPU_ROOT 0
#define JH7110_SYSCLK_CPU_CORE 1
@@ -218,4 +225,77 @@
#define JH7110_AONCLK_END 14
+/* STGCRG clocks */
+#define JH7110_STGCLK_HIFI4_CLK_CORE 0
+#define JH7110_STGCLK_USB0_APB 1
+#define JH7110_STGCLK_USB0_UTMI_APB 2
+#define JH7110_STGCLK_USB0_AXI 3
+#define JH7110_STGCLK_USB0_LPM 4
+#define JH7110_STGCLK_USB0_STB 5
+#define JH7110_STGCLK_USB0_APP_125 6
+#define JH7110_STGCLK_USB0_REFCLK 7
+#define JH7110_STGCLK_PCIE0_AXI_MST0 8
+#define JH7110_STGCLK_PCIE0_APB 9
+#define JH7110_STGCLK_PCIE0_TL 10
+#define JH7110_STGCLK_PCIE1_AXI_MST0 11
+#define JH7110_STGCLK_PCIE1_APB 12
+#define JH7110_STGCLK_PCIE1_TL 13
+#define JH7110_STGCLK_PCIE_SLV_MAIN 14
+#define JH7110_STGCLK_SEC_AHB 15
+#define JH7110_STGCLK_SEC_MISC_AHB 16
+#define JH7110_STGCLK_GRP0_MAIN 17
+#define JH7110_STGCLK_GRP0_BUS 18
+#define JH7110_STGCLK_GRP0_STG 19
+#define JH7110_STGCLK_GRP1_MAIN 20
+#define JH7110_STGCLK_GRP1_BUS 21
+#define JH7110_STGCLK_GRP1_STG 22
+#define JH7110_STGCLK_GRP1_HIFI 23
+#define JH7110_STGCLK_E2_RTC 24
+#define JH7110_STGCLK_E2_CORE 25
+#define JH7110_STGCLK_E2_DBG 26
+#define JH7110_STGCLK_DMA1P_AXI 27
+#define JH7110_STGCLK_DMA1P_AHB 28
+
+#define JH7110_STGCLK_END 29
+
+/* ISPCRG clocks */
+#define JH7110_ISPCLK_DOM4_APB_FUNC 0
+#define JH7110_ISPCLK_MIPI_RX0_PXL 1
+#define JH7110_ISPCLK_DVP_INV 2
+#define JH7110_ISPCLK_M31DPHY_CFG_IN 3
+#define JH7110_ISPCLK_M31DPHY_REF_IN 4
+#define JH7110_ISPCLK_M31DPHY_TX_ESC_LAN0 5
+#define JH7110_ISPCLK_VIN_APB 6
+#define JH7110_ISPCLK_VIN_SYS 7
+#define JH7110_ISPCLK_VIN_PIXEL_IF0 8
+#define JH7110_ISPCLK_VIN_PIXEL_IF1 9
+#define JH7110_ISPCLK_VIN_PIXEL_IF2 10
+#define JH7110_ISPCLK_VIN_PIXEL_IF3 11
+#define JH7110_ISPCLK_VIN_P_AXI_WR 12
+#define JH7110_ISPCLK_ISPV2_TOP_WRAPPER_C 13
+
+#define JH7110_ISPCLK_END 14
+
+/* VOUTCRG clocks */
+#define JH7110_VOUTCLK_APB 0
+#define JH7110_VOUTCLK_DC8200_PIX 1
+#define JH7110_VOUTCLK_DSI_SYS 2
+#define JH7110_VOUTCLK_TX_ESC 3
+#define JH7110_VOUTCLK_DC8200_AXI 4
+#define JH7110_VOUTCLK_DC8200_CORE 5
+#define JH7110_VOUTCLK_DC8200_AHB 6
+#define JH7110_VOUTCLK_DC8200_PIX0 7
+#define JH7110_VOUTCLK_DC8200_PIX1 8
+#define JH7110_VOUTCLK_DOM_VOUT_TOP_LCD 9
+#define JH7110_VOUTCLK_DSITX_APB 10
+#define JH7110_VOUTCLK_DSITX_SYS 11
+#define JH7110_VOUTCLK_DSITX_DPI 12
+#define JH7110_VOUTCLK_DSITX_TXESC 13
+#define JH7110_VOUTCLK_MIPITX_DPHY_TXESC 14
+#define JH7110_VOUTCLK_HDMI_TX_MCLK 15
+#define JH7110_VOUTCLK_HDMI_TX_BCLK 16
+#define JH7110_VOUTCLK_HDMI_TX_SYS 17
+
+#define JH7110_VOUTCLK_END 18
+
#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__ */
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 25e8cfd43459..0a5324bcdbda 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
diff --git a/include/dt-bindings/clock/stm32mp13-clks.h b/include/dt-bindings/clock/stm32mp13-clks.h
index 02befd25edce..0bd7b54c65ff 100644
--- a/include/dt-bindings/clock/stm32mp13-clks.h
+++ b/include/dt-bindings/clock/stm32mp13-clks.h
@@ -1,7 +1,7 @@
-/* SPDX-License-Identifier: GPL-2.0+ or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2020 - All Rights Reserved
- * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
*/
#ifndef _DT_BINDINGS_STM32MP13_CLKS_H_
@@ -64,7 +64,7 @@
#define CK_MCO1 38
#define CK_MCO2 39
-/* IP clocks */
+/* IP clocks */
#define SYSCFG 40
#define VREF 41
#define DTS 42
diff --git a/include/dt-bindings/clock/sun20i-d1-ccu.h b/include/dt-bindings/clock/sun20i-d1-ccu.h
index e143b9929763..fdbfb404f92a 100644
--- a/include/dt-bindings/clock/sun20i-d1-ccu.h
+++ b/include/dt-bindings/clock/sun20i-d1-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2020 huangzhenwei@allwinnertech.com
* Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
diff --git a/include/dt-bindings/clock/sun20i-d1-r-ccu.h b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
index 4c2697fd32b0..f95c170711e5 100644
--- a/include/dt-bindings/clock/sun20i-d1-r-ccu.h
+++ b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
*/
diff --git a/include/dt-bindings/clock/sun50i-a100-ccu.h b/include/dt-bindings/clock/sun50i-a100-ccu.h
index 28dc36e1a232..06a2031d466b 100644
--- a/include/dt-bindings/clock/sun50i-a100-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a100-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
*/
diff --git a/include/dt-bindings/clock/sun50i-h6-ccu.h b/include/dt-bindings/clock/sun50i-h6-ccu.h
index a1545cd60e75..ef9123d81937 100644
--- a/include/dt-bindings/clock/sun50i-h6-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h6-ccu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/include/dt-bindings/clock/sun50i-h616-ccu.h b/include/dt-bindings/clock/sun50i-h616-ccu.h
index 1191aca53ac6..6f8f01e67628 100644
--- a/include/dt-bindings/clock/sun50i-h616-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h616-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2020 Arm Ltd.
*/
diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h
index c845493e4d37..3bd3aa3d57ce 100644
--- a/include/dt-bindings/clock/sun6i-rtc.h
+++ b/include/dt-bindings/clock/sun6i-rtc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_
#define _DT_BINDINGS_CLK_SUN6I_RTC_H_
diff --git a/include/dt-bindings/display/sdtv-standards.h b/include/dt-bindings/display/sdtv-standards.h
index fbc1a3db2ea7..8249a2b47b79 100644
--- a/include/dt-bindings/display/sdtv-standards.h
+++ b/include/dt-bindings/display/sdtv-standards.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only or X11 */
+/* SPDX-License-Identifier: GPL-2.0-only OR X11 */
/*
* Copyright 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
*/
diff --git a/include/dt-bindings/dma/fsl-edma.h b/include/dt-bindings/dma/fsl-edma.h
new file mode 100644
index 000000000000..fd11478cfe9c
--- /dev/null
+++ b/include/dt-bindings/dma/fsl-edma.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef _FSL_EDMA_DT_BINDING_H_
+#define _FSL_EDMA_DT_BINDING_H_
+
+/* Receive Channel */
+#define FSL_EDMA_RX 0x1
+
+/* iMX8 audio remote DMA */
+#define FSL_EDMA_REMOTE 0x2
+
+/* FIFO is continue memory region */
+#define FSL_EDMA_MULTI_FIFO 0x4
+
+/* Channel need stick to even channel */
+#define FSL_EDMA_EVEN_CH 0x8
+
+/* Channel need stick to odd channel */
+#define FSL_EDMA_ODD_CH 0x10
+
+#endif
diff --git a/include/dt-bindings/firmware/qcom,scm.h b/include/dt-bindings/firmware/qcom,scm.h
index d1dc09e72923..6de8b08e1e79 100644
--- a/include/dt-bindings/firmware/qcom,scm.h
+++ b/include/dt-bindings/firmware/qcom,scm.h
@@ -2,17 +2,38 @@
/*
* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
#define _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+#define QCOM_SCM_VMID_TZ 0x1
#define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_SSC_Q6 0x5
#define QCOM_SCM_VMID_ADSP_Q6 0x6
+#define QCOM_SCM_VMID_CP_TOUCH 0x8
+#define QCOM_SCM_VMID_CP_BITSTREAM 0x9
+#define QCOM_SCM_VMID_CP_PIXEL 0xA
+#define QCOM_SCM_VMID_CP_NON_PIXEL 0xB
+#define QCOM_SCM_VMID_CP_CAMERA 0xD
+#define QCOM_SCM_VMID_HLOS_FREE 0xE
#define QCOM_SCM_VMID_MSS_MSA 0xF
+#define QCOM_SCM_VMID_MSS_NONMSA 0x10
+#define QCOM_SCM_VMID_CP_SEC_DISPLAY 0x11
+#define QCOM_SCM_VMID_CP_APP 0x12
+#define QCOM_SCM_VMID_LPASS 0x16
#define QCOM_SCM_VMID_WLAN 0x18
#define QCOM_SCM_VMID_WLAN_CE 0x19
+#define QCOM_SCM_VMID_CP_SPSS_SP 0x1A
+#define QCOM_SCM_VMID_CP_CAMERA_PREVIEW 0x1D
+#define QCOM_SCM_VMID_CDSP 0x1E
+#define QCOM_SCM_VMID_CP_SPSS_SP_SHARED 0x22
+#define QCOM_SCM_VMID_CP_SPSS_HLOS_SHARED 0x24
+#define QCOM_SCM_VMID_ADSP_HEAP 0x25
+#define QCOM_SCM_VMID_CP_CDSP 0x2A
#define QCOM_SCM_VMID_NAV 0x2B
+#define QCOM_SCM_VMID_TVM 0x2D
+#define QCOM_SCM_VMID_OEMVM 0x31
#endif
diff --git a/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h
new file mode 100644
index 000000000000..4e16d31a71c9
--- /dev/null
+++ b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_T7_GPIO_H
+#define _DT_BINDINGS_AMLOGIC_T7_GPIO_H
+
+#define GPIOB_0 0
+#define GPIOB_1 1
+#define GPIOB_2 2
+#define GPIOB_3 3
+#define GPIOB_4 4
+#define GPIOB_5 5
+#define GPIOB_6 6
+#define GPIOB_7 7
+#define GPIOB_8 8
+#define GPIOB_9 9
+#define GPIOB_10 10
+#define GPIOB_11 11
+#define GPIOB_12 12
+
+#define GPIOC_0 13
+#define GPIOC_1 14
+#define GPIOC_2 15
+#define GPIOC_3 16
+#define GPIOC_4 17
+#define GPIOC_5 18
+#define GPIOC_6 19
+
+#define GPIOX_0 20
+#define GPIOX_1 21
+#define GPIOX_2 22
+#define GPIOX_3 23
+#define GPIOX_4 24
+#define GPIOX_5 25
+#define GPIOX_6 26
+#define GPIOX_7 27
+#define GPIOX_8 28
+#define GPIOX_9 29
+#define GPIOX_10 30
+#define GPIOX_11 31
+#define GPIOX_12 32
+#define GPIOX_13 33
+#define GPIOX_14 34
+#define GPIOX_15 35
+#define GPIOX_16 36
+#define GPIOX_17 37
+#define GPIOX_18 38
+#define GPIOX_19 39
+
+#define GPIOW_0 40
+#define GPIOW_1 41
+#define GPIOW_2 42
+#define GPIOW_3 43
+#define GPIOW_4 44
+#define GPIOW_5 45
+#define GPIOW_6 46
+#define GPIOW_7 47
+#define GPIOW_8 48
+#define GPIOW_9 49
+#define GPIOW_10 50
+#define GPIOW_11 51
+#define GPIOW_12 52
+#define GPIOW_13 53
+#define GPIOW_14 54
+#define GPIOW_15 55
+#define GPIOW_16 56
+
+#define GPIOD_0 57
+#define GPIOD_1 58
+#define GPIOD_2 59
+#define GPIOD_3 60
+#define GPIOD_4 61
+#define GPIOD_5 62
+#define GPIOD_6 63
+#define GPIOD_7 64
+#define GPIOD_8 65
+#define GPIOD_9 66
+#define GPIOD_10 67
+#define GPIOD_11 68
+#define GPIOD_12 69
+
+#define GPIOE_0 70
+#define GPIOE_1 71
+#define GPIOE_2 72
+#define GPIOE_3 73
+#define GPIOE_4 74
+#define GPIOE_5 75
+#define GPIOE_6 76
+
+#define GPIOZ_0 77
+#define GPIOZ_1 78
+#define GPIOZ_2 79
+#define GPIOZ_3 80
+#define GPIOZ_4 81
+#define GPIOZ_5 82
+#define GPIOZ_6 83
+#define GPIOZ_7 84
+#define GPIOZ_8 85
+#define GPIOZ_9 86
+#define GPIOZ_10 87
+#define GPIOZ_11 88
+#define GPIOZ_12 89
+#define GPIOZ_13 90
+
+#define GPIOT_0 91
+#define GPIOT_1 92
+#define GPIOT_2 93
+#define GPIOT_3 94
+#define GPIOT_4 95
+#define GPIOT_5 96
+#define GPIOT_6 97
+#define GPIOT_7 98
+#define GPIOT_8 99
+#define GPIOT_9 100
+#define GPIOT_10 101
+#define GPIOT_11 102
+#define GPIOT_12 103
+#define GPIOT_13 104
+#define GPIOT_14 105
+#define GPIOT_15 106
+#define GPIOT_16 107
+#define GPIOT_17 108
+#define GPIOT_18 109
+#define GPIOT_19 110
+#define GPIOT_20 111
+#define GPIOT_21 112
+#define GPIOT_22 113
+#define GPIOT_23 114
+
+#define GPIOM_0 115
+#define GPIOM_1 116
+#define GPIOM_2 117
+#define GPIOM_3 118
+#define GPIOM_4 119
+#define GPIOM_5 120
+#define GPIOM_6 121
+#define GPIOM_7 122
+#define GPIOM_8 123
+#define GPIOM_9 124
+#define GPIOM_10 125
+#define GPIOM_11 126
+#define GPIOM_12 127
+#define GPIOM_13 128
+
+#define GPIOY_0 129
+#define GPIOY_1 130
+#define GPIOY_2 131
+#define GPIOY_3 132
+#define GPIOY_4 133
+#define GPIOY_5 134
+#define GPIOY_6 135
+#define GPIOY_7 136
+#define GPIOY_8 137
+#define GPIOY_9 138
+#define GPIOY_10 139
+#define GPIOY_11 140
+#define GPIOY_12 141
+#define GPIOY_13 142
+#define GPIOY_14 143
+#define GPIOY_15 144
+#define GPIOY_16 145
+#define GPIOY_17 146
+#define GPIOY_18 147
+
+#define GPIOH_0 148
+#define GPIOH_1 149
+#define GPIOH_2 150
+#define GPIOH_3 151
+#define GPIOH_4 152
+#define GPIOH_5 153
+#define GPIOH_6 154
+#define GPIOH_7 155
+
+#define GPIO_TEST_N 156
+
+#endif /* _DT_BINDINGS_AMLOGIC_T7_GPIO_H */
diff --git a/include/dt-bindings/gpio/amlogic-c3-gpio.h b/include/dt-bindings/gpio/amlogic-c3-gpio.h
new file mode 100644
index 000000000000..75c8da6f505f
--- /dev/null
+++ b/include/dt-bindings/gpio/amlogic-c3-gpio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_C3_GPIO_H
+#define _DT_BINDINGS_AMLOGIC_C3_GPIO_H
+
+#define GPIOE_0 0
+#define GPIOE_1 1
+#define GPIOE_2 2
+#define GPIOE_3 3
+#define GPIOE_4 4
+
+#define GPIOB_0 5
+#define GPIOB_1 6
+#define GPIOB_2 7
+#define GPIOB_3 8
+#define GPIOB_4 9
+#define GPIOB_5 10
+#define GPIOB_6 11
+#define GPIOB_7 12
+#define GPIOB_8 13
+#define GPIOB_9 14
+#define GPIOB_10 15
+#define GPIOB_11 16
+#define GPIOB_12 17
+#define GPIOB_13 18
+#define GPIOB_14 19
+
+#define GPIOC_0 20
+#define GPIOC_1 21
+#define GPIOC_2 22
+#define GPIOC_3 23
+#define GPIOC_4 24
+#define GPIOC_5 25
+#define GPIOC_6 26
+
+#define GPIOX_0 27
+#define GPIOX_1 28
+#define GPIOX_2 29
+#define GPIOX_3 30
+#define GPIOX_4 31
+#define GPIOX_5 32
+#define GPIOX_6 33
+#define GPIOX_7 34
+#define GPIOX_8 35
+#define GPIOX_9 36
+#define GPIOX_10 37
+#define GPIOX_11 38
+#define GPIOX_12 39
+#define GPIOX_13 40
+
+#define GPIOD_0 41
+#define GPIOD_1 42
+#define GPIOD_2 43
+#define GPIOD_3 44
+#define GPIOD_4 45
+#define GPIOD_5 46
+#define GPIOD_6 47
+
+#define GPIOA_0 48
+#define GPIOA_1 49
+#define GPIOA_2 50
+#define GPIOA_3 51
+#define GPIOA_4 52
+#define GPIOA_5 53
+
+#define GPIO_TEST_N 54
+
+#endif /* _DT_BINDINGS_AMLOGIC_C3_GPIO_H */
diff --git a/include/dt-bindings/gpio/meson-g12a-gpio.h b/include/dt-bindings/gpio/meson-g12a-gpio.h
index f7bd69350d18..fa7bb0bbf010 100644
--- a/include/dt-bindings/gpio/meson-g12a-gpio.h
+++ b/include/dt-bindings/gpio/meson-g12a-gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2018 Amlogic, Inc. All rights reserved.
* Author: Xingyu Chen <xingyu.chen@amlogic.com>
diff --git a/include/dt-bindings/gpio/tegra234-gpio.h b/include/dt-bindings/gpio/tegra234-gpio.h
index d7a1f2e298e8..784673c2c752 100644
--- a/include/dt-bindings/gpio/tegra234-gpio.h
+++ b/include/dt-bindings/gpio/tegra234-gpio.h
@@ -33,18 +33,14 @@
#define TEGRA234_MAIN_GPIO_PORT_P 14
#define TEGRA234_MAIN_GPIO_PORT_Q 15
#define TEGRA234_MAIN_GPIO_PORT_R 16
-#define TEGRA234_MAIN_GPIO_PORT_S 17
-#define TEGRA234_MAIN_GPIO_PORT_T 18
-#define TEGRA234_MAIN_GPIO_PORT_U 19
-#define TEGRA234_MAIN_GPIO_PORT_V 20
-#define TEGRA234_MAIN_GPIO_PORT_X 21
-#define TEGRA234_MAIN_GPIO_PORT_Y 22
-#define TEGRA234_MAIN_GPIO_PORT_Z 23
-#define TEGRA234_MAIN_GPIO_PORT_AC 24
-#define TEGRA234_MAIN_GPIO_PORT_AD 25
-#define TEGRA234_MAIN_GPIO_PORT_AE 26
-#define TEGRA234_MAIN_GPIO_PORT_AF 27
-#define TEGRA234_MAIN_GPIO_PORT_AG 28
+#define TEGRA234_MAIN_GPIO_PORT_X 17
+#define TEGRA234_MAIN_GPIO_PORT_Y 18
+#define TEGRA234_MAIN_GPIO_PORT_Z 19
+#define TEGRA234_MAIN_GPIO_PORT_AC 20
+#define TEGRA234_MAIN_GPIO_PORT_AD 21
+#define TEGRA234_MAIN_GPIO_PORT_AE 22
+#define TEGRA234_MAIN_GPIO_PORT_AF 23
+#define TEGRA234_MAIN_GPIO_PORT_AG 24
#define TEGRA234_MAIN_GPIO(port, offset) \
((TEGRA234_MAIN_GPIO_PORT_##port * 8) + offset)
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h
new file mode 100644
index 000000000000..96908014e09e
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H
+
+#ifndef PM7325_SID
+#define PM7325_SID 1
+#endif
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+/* ADC channels for PM7325_ADC for PMIC7 */
+#define PM7325_ADC7_REF_GND (PM7325_SID << 8 | ADC7_REF_GND)
+#define PM7325_ADC7_1P25VREF (PM7325_SID << 8 | ADC7_1P25VREF)
+#define PM7325_ADC7_VREF_VADC (PM7325_SID << 8 | ADC7_VREF_VADC)
+#define PM7325_ADC7_DIE_TEMP (PM7325_SID << 8 | ADC7_DIE_TEMP)
+
+#define PM7325_ADC7_AMUX_THM1 (PM7325_SID << 8 | ADC7_AMUX_THM1)
+#define PM7325_ADC7_AMUX_THM2 (PM7325_SID << 8 | ADC7_AMUX_THM2)
+#define PM7325_ADC7_AMUX_THM3 (PM7325_SID << 8 | ADC7_AMUX_THM3)
+#define PM7325_ADC7_AMUX_THM4 (PM7325_SID << 8 | ADC7_AMUX_THM4)
+#define PM7325_ADC7_AMUX_THM5 (PM7325_SID << 8 | ADC7_AMUX_THM5)
+#define PM7325_ADC7_GPIO1 (PM7325_SID << 8 | ADC7_GPIO1)
+#define PM7325_ADC7_GPIO2 (PM7325_SID << 8 | ADC7_GPIO2)
+#define PM7325_ADC7_GPIO3 (PM7325_SID << 8 | ADC7_GPIO3)
+#define PM7325_ADC7_GPIO4 (PM7325_SID << 8 | ADC7_GPIO4)
+
+/* 30k pull-up1 */
+#define PM7325_ADC7_AMUX_THM1_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM7325_ADC7_AMUX_THM2_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM7325_ADC7_AMUX_THM3_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM7325_ADC7_AMUX_THM4_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM7325_ADC7_AMUX_THM5_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM7325_ADC7_GPIO1_30K_PU (PM7325_SID << 8 | ADC7_GPIO1_30K_PU)
+#define PM7325_ADC7_GPIO2_30K_PU (PM7325_SID << 8 | ADC7_GPIO2_30K_PU)
+#define PM7325_ADC7_GPIO3_30K_PU (PM7325_SID << 8 | ADC7_GPIO3_30K_PU)
+#define PM7325_ADC7_GPIO4_30K_PU (PM7325_SID << 8 | ADC7_GPIO4_30K_PU)
+
+/* 100k pull-up2 */
+#define PM7325_ADC7_AMUX_THM1_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM7325_ADC7_AMUX_THM2_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM7325_ADC7_AMUX_THM3_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM7325_ADC7_AMUX_THM4_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM7325_ADC7_AMUX_THM5_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM7325_ADC7_GPIO1_100K_PU (PM7325_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PM7325_ADC7_GPIO2_100K_PU (PM7325_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PM7325_ADC7_GPIO3_100K_PU (PM7325_SID << 8 | ADC7_GPIO3_100K_PU)
+#define PM7325_ADC7_GPIO4_100K_PU (PM7325_SID << 8 | ADC7_GPIO4_100K_PU)
+
+/* 400k pull-up3 */
+#define PM7325_ADC7_AMUX_THM1_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM7325_ADC7_AMUX_THM2_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM7325_ADC7_AMUX_THM3_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM7325_ADC7_AMUX_THM4_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM7325_ADC7_AMUX_THM5_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM7325_ADC7_GPIO1_400K_PU (PM7325_SID << 8 | ADC7_GPIO1_400K_PU)
+#define PM7325_ADC7_GPIO2_400K_PU (PM7325_SID << 8 | ADC7_GPIO2_400K_PU)
+#define PM7325_ADC7_GPIO3_400K_PU (PM7325_SID << 8 | ADC7_GPIO3_400K_PU)
+#define PM7325_ADC7_GPIO4_400K_PU (PM7325_SID << 8 | ADC7_GPIO4_400K_PU)
+
+/* 1/3 Divider */
+#define PM7325_ADC7_GPIO4_DIV3 (PM7325_SID << 8 | ADC7_GPIO4_DIV3)
+
+#define PM7325_ADC7_VPH_PWR (PM7325_SID << 8 | ADC7_VPH_PWR)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
index 09fd169ad18e..5d98f7d48a1e 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
@@ -6,58 +6,60 @@
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PM8350_ADC for PMIC7 */
-#define PM8350_ADC7_REF_GND(sid) ((sid) << 8 | 0x0)
-#define PM8350_ADC7_1P25VREF(sid) ((sid) << 8 | 0x01)
-#define PM8350_ADC7_VREF_VADC(sid) ((sid) << 8 | 0x02)
-#define PM8350_ADC7_DIE_TEMP(sid) ((sid) << 8 | 0x03)
-
-#define PM8350_ADC7_AMUX_THM1(sid) ((sid) << 8 | 0x04)
-#define PM8350_ADC7_AMUX_THM2(sid) ((sid) << 8 | 0x05)
-#define PM8350_ADC7_AMUX_THM3(sid) ((sid) << 8 | 0x06)
-#define PM8350_ADC7_AMUX_THM4(sid) ((sid) << 8 | 0x07)
-#define PM8350_ADC7_AMUX_THM5(sid) ((sid) << 8 | 0x08)
-#define PM8350_ADC7_GPIO1(sid) ((sid) << 8 | 0x0a)
-#define PM8350_ADC7_GPIO2(sid) ((sid) << 8 | 0x0b)
-#define PM8350_ADC7_GPIO3(sid) ((sid) << 8 | 0x0c)
-#define PM8350_ADC7_GPIO4(sid) ((sid) << 8 | 0x0d)
+#define PM8350_ADC7_REF_GND(sid) ((sid) << 8 | ADC7_REF_GND)
+#define PM8350_ADC7_1P25VREF(sid) ((sid) << 8 | ADC7_1P25VREF)
+#define PM8350_ADC7_VREF_VADC(sid) ((sid) << 8 | ADC7_VREF_VADC)
+#define PM8350_ADC7_DIE_TEMP(sid) ((sid) << 8 | ADC7_DIE_TEMP)
+
+#define PM8350_ADC7_AMUX_THM1(sid) ((sid) << 8 | ADC7_AMUX_THM1)
+#define PM8350_ADC7_AMUX_THM2(sid) ((sid) << 8 | ADC7_AMUX_THM2)
+#define PM8350_ADC7_AMUX_THM3(sid) ((sid) << 8 | ADC7_AMUX_THM3)
+#define PM8350_ADC7_AMUX_THM4(sid) ((sid) << 8 | ADC7_AMUX_THM4)
+#define PM8350_ADC7_AMUX_THM5(sid) ((sid) << 8 | ADC7_AMUX_THM5)
+#define PM8350_ADC7_GPIO1(sid) ((sid) << 8 | ADC7_GPIO1)
+#define PM8350_ADC7_GPIO2(sid) ((sid) << 8 | ADC7_GPIO2)
+#define PM8350_ADC7_GPIO3(sid) ((sid) << 8 | ADC7_GPIO3)
+#define PM8350_ADC7_GPIO4(sid) ((sid) << 8 | ADC7_GPIO4)
/* 30k pull-up1 */
-#define PM8350_ADC7_AMUX_THM1_30K_PU(sid) ((sid) << 8 | 0x24)
-#define PM8350_ADC7_AMUX_THM2_30K_PU(sid) ((sid) << 8 | 0x25)
-#define PM8350_ADC7_AMUX_THM3_30K_PU(sid) ((sid) << 8 | 0x26)
-#define PM8350_ADC7_AMUX_THM4_30K_PU(sid) ((sid) << 8 | 0x27)
-#define PM8350_ADC7_AMUX_THM5_30K_PU(sid) ((sid) << 8 | 0x28)
-#define PM8350_ADC7_GPIO1_30K_PU(sid) ((sid) << 8 | 0x2a)
-#define PM8350_ADC7_GPIO2_30K_PU(sid) ((sid) << 8 | 0x2b)
-#define PM8350_ADC7_GPIO3_30K_PU(sid) ((sid) << 8 | 0x2c)
-#define PM8350_ADC7_GPIO4_30K_PU(sid) ((sid) << 8 | 0x2d)
+#define PM8350_ADC7_AMUX_THM1_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350_ADC7_AMUX_THM2_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350_ADC7_AMUX_THM3_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350_ADC7_AMUX_THM4_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350_ADC7_AMUX_THM5_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350_ADC7_GPIO1_30K_PU(sid) ((sid) << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350_ADC7_GPIO2_30K_PU(sid) ((sid) << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350_ADC7_GPIO3_30K_PU(sid) ((sid) << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350_ADC7_GPIO4_30K_PU(sid) ((sid) << 8 | ADC7_GPIO4_30K_PU)
/* 100k pull-up2 */
-#define PM8350_ADC7_AMUX_THM1_100K_PU(sid) ((sid) << 8 | 0x44)
-#define PM8350_ADC7_AMUX_THM2_100K_PU(sid) ((sid) << 8 | 0x45)
-#define PM8350_ADC7_AMUX_THM3_100K_PU(sid) ((sid) << 8 | 0x46)
-#define PM8350_ADC7_AMUX_THM4_100K_PU(sid) ((sid) << 8 | 0x47)
-#define PM8350_ADC7_AMUX_THM5_100K_PU(sid) ((sid) << 8 | 0x48)
-#define PM8350_ADC7_GPIO1_100K_PU(sid) ((sid) << 8 | 0x4a)
-#define PM8350_ADC7_GPIO2_100K_PU(sid) ((sid) << 8 | 0x4b)
-#define PM8350_ADC7_GPIO3_100K_PU(sid) ((sid) << 8 | 0x4c)
-#define PM8350_ADC7_GPIO4_100K_PU(sid) ((sid) << 8 | 0x4d)
+#define PM8350_ADC7_AMUX_THM1_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350_ADC7_AMUX_THM2_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350_ADC7_AMUX_THM3_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350_ADC7_AMUX_THM4_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350_ADC7_AMUX_THM5_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350_ADC7_GPIO1_100K_PU(sid) ((sid) << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350_ADC7_GPIO2_100K_PU(sid) ((sid) << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350_ADC7_GPIO3_100K_PU(sid) ((sid) << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350_ADC7_GPIO4_100K_PU(sid) ((sid) << 8 | ADC7_GPIO4_100K_PU)
/* 400k pull-up3 */
-#define PM8350_ADC7_AMUX_THM1_400K_PU(sid) ((sid) << 8 | 0x64)
-#define PM8350_ADC7_AMUX_THM2_400K_PU(sid) ((sid) << 8 | 0x65)
-#define PM8350_ADC7_AMUX_THM3_400K_PU(sid) ((sid) << 8 | 0x66)
-#define PM8350_ADC7_AMUX_THM4_400K_PU(sid) ((sid) << 8 | 0x67)
-#define PM8350_ADC7_AMUX_THM5_400K_PU(sid) ((sid) << 8 | 0x68)
-#define PM8350_ADC7_GPIO1_400K_PU(sid) ((sid) << 8 | 0x6a)
-#define PM8350_ADC7_GPIO2_400K_PU(sid) ((sid) << 8 | 0x6b)
-#define PM8350_ADC7_GPIO3_400K_PU(sid) ((sid) << 8 | 0x6c)
-#define PM8350_ADC7_GPIO4_400K_PU(sid) ((sid) << 8 | 0x6d)
+#define PM8350_ADC7_AMUX_THM1_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350_ADC7_AMUX_THM2_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350_ADC7_AMUX_THM3_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350_ADC7_AMUX_THM4_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350_ADC7_AMUX_THM5_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350_ADC7_GPIO1_400K_PU(sid) ((sid) << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350_ADC7_GPIO2_400K_PU(sid) ((sid) << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350_ADC7_GPIO3_400K_PU(sid) ((sid) << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350_ADC7_GPIO4_400K_PU(sid) ((sid) << 8 | ADC7_GPIO4_400K_PU)
/* 1/3 Divider */
-#define PM8350_ADC7_GPIO4_DIV3(sid) ((sid) << 8 | 0x8d)
+#define PM8350_ADC7_GPIO4_DIV3(sid) ((sid) << 8 | ADC7_GPIO4_DIV3)
-#define PM8350_ADC7_VPH_PWR(sid) ((sid) << 8 | 0x8e)
+#define PM8350_ADC7_VPH_PWR(sid) ((sid) << 8 | ADC7_VPH_PWR)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
index dc2497c27e16..57c7977666d3 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
@@ -10,79 +10,81 @@
#define PM8350B_SID 3
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PM8350B_ADC for PMIC7 */
-#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | 0x0)
-#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | 0x01)
-#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | 0x02)
-#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | 0x03)
+#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | ADC7_REF_GND)
+#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | ADC7_1P25VREF)
+#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | ADC7_VREF_VADC)
+#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | ADC7_DIE_TEMP)
-#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | 0x04)
-#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | 0x05)
-#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | 0x06)
-#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | 0x07)
-#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | 0x08)
-#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | 0x09)
-#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | 0x0a)
-#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | 0x0b)
-#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | 0x0c)
-#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | 0x0d)
+#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | ADC7_AMUX_THM1)
+#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | ADC7_AMUX_THM2)
+#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | ADC7_AMUX_THM3)
+#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | ADC7_AMUX_THM4)
+#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | ADC7_AMUX_THM5)
+#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | ADC7_AMUX_THM6)
+#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | ADC7_GPIO1)
+#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | ADC7_GPIO2)
+#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | ADC7_GPIO3)
+#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | ADC7_GPIO4)
-#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | 0x10)
-#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | 0x11)
-#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | 0x12)
-#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | 0x13)
-#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | 0x15)
-#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | 0x17)
+#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | ADC7_CHG_TEMP)
+#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | ADC7_USB_IN_V_16)
+#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | ADC7_VDC_16)
+#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | ADC7_CC1_ID)
+#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | ADC7_VREF_BAT_THERM)
+#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | ADC7_IIN_FB)
/* 30k pull-up1 */
-#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | 0x24)
-#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | 0x25)
-#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | 0x26)
-#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | 0x27)
-#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | 0x28)
-#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | 0x29)
-#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | 0x2a)
-#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | 0x2b)
-#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | 0x2c)
-#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | 0x2d)
-#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | 0x33)
+#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_30K_PU)
+#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | ADC7_GPIO4_30K_PU)
+#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_30K_PU)
/* 100k pull-up2 */
-#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | 0x44)
-#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | 0x45)
-#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | 0x46)
-#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | 0x47)
-#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | 0x48)
-#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | 0x49)
-#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | 0x4a)
-#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | 0x4b)
-#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | 0x4c)
-#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | 0x4d)
-#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | 0x53)
+#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_100K_PU)
+#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | ADC7_GPIO4_100K_PU)
+#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_100K_PU)
/* 400k pull-up3 */
-#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | 0x64)
-#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | 0x65)
-#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | 0x66)
-#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | 0x67)
-#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | 0x68)
-#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | 0x69)
-#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | 0x6a)
-#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | 0x6b)
-#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | 0x6c)
-#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | 0x6d)
-#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | 0x73)
+#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_400K_PU)
+#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | ADC7_GPIO4_400K_PU)
+#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_400K_PU)
/* 1/3 Divider */
-#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | 0x8a)
-#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | 0x8b)
-#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | 0x8c)
-#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | 0x8d)
+#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | ADC7_GPIO1_DIV3)
+#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | ADC7_GPIO2_DIV3)
+#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | ADC7_GPIO3_DIV3)
+#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | ADC7_GPIO4_DIV3)
-#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | 0x8e)
-#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | 0x8f)
+#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | ADC7_VPH_PWR)
+#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | ADC7_VBAT_SNS)
-#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | 0x94)
-#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | 0x96)
+#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | ADC7_SBU)
+#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | ADC7_VBAT_2S_MID)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
index 6c296870e95b..3d1a41a22cef 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
@@ -10,37 +10,39 @@
#define PMK8350_SID 0
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMK8350_ADC for PMIC7 */
-#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | 0x0)
-#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | 0x01)
-#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | 0x02)
-#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | 0x03)
+#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | ADC7_REF_GND)
+#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | ADC7_1P25VREF)
+#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | ADC7_VREF_VADC)
+#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | ADC7_DIE_TEMP)
-#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | 0x04)
-#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | 0x05)
-#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | 0x06)
-#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | 0x07)
-#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | 0x08)
+#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | ADC7_AMUX_THM1)
+#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | ADC7_AMUX_THM2)
+#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | ADC7_AMUX_THM3)
+#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | ADC7_AMUX_THM4)
+#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | ADC7_AMUX_THM5)
/* 30k pull-up1 */
-#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | 0x24)
-#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | 0x25)
-#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | 0x26)
-#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | 0x27)
-#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | 0x28)
+#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_30K_PU)
/* 100k pull-up2 */
-#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | 0x44)
-#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | 0x45)
-#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | 0x46)
-#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | 0x47)
-#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | 0x48)
+#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_100K_PU)
/* 400k pull-up3 */
-#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | 0x64)
-#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | 0x65)
-#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | 0x66)
-#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | 0x67)
-#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | 0x68)
+#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_400K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
index d6df1b19e5ff..c5adfa82b20d 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
@@ -10,19 +10,21 @@
#define PMR735A_SID 4
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735A_ADC for PMIC7 */
-#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | 0x0)
-#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | 0x01)
-#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | 0x02)
-#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | 0x03)
+#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | ADC7_REF_GND)
+#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | ADC7_1P25VREF)
+#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | ADC7_VREF_VADC)
+#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | 0x0a)
-#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | 0x0b)
-#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | 0x0c)
+#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | ADC7_GPIO1)
+#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | ADC7_GPIO2)
+#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | 0x4a)
-#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | 0x4b)
-#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | 0x4c)
+#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
index 8da0e7dab315..fdb8dd9ae541 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
@@ -10,19 +10,21 @@
#define PMR735B_SID 5
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735B_ADC for PMIC7 */
-#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | 0x0)
-#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | 0x01)
-#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | 0x02)
-#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | 0x03)
+#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | ADC7_REF_GND)
+#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | ADC7_1P25VREF)
+#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | ADC7_VREF_VADC)
+#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | 0x0a)
-#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | 0x0b)
-#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | 0x0c)
+#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | ADC7_GPIO1)
+#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | ADC7_GPIO2)
+#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | 0x4a)
-#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | 0x4b)
-#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | 0x4c)
+#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h
new file mode 100644
index 000000000000..c0680d1285cf
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+#define SMB139x_1_ADC7_SMB_TEMP (SMB139x_1_SID << 8 | ADC7_SMB_TEMP)
+#define SMB139x_1_ADC7_ICHG_SMB (SMB139x_1_SID << 8 | ADC7_ICHG_SMB)
+#define SMB139x_1_ADC7_IIN_SMB (SMB139x_1_SID << 8 | ADC7_IIN_SMB)
+
+#define SMB139x_2_ADC7_SMB_TEMP (SMB139x_2_SID << 8 | ADC7_SMB_TEMP)
+#define SMB139x_2_ADC7_ICHG_SMB (SMB139x_2_SID << 8 | ADC7_ICHG_SMB)
+#define SMB139x_2_ADC7_IIN_SMB (SMB139x_2_SID << 8 | ADC7_IIN_SMB)
+
+#endif
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index 08adfe25964c..ef07ecd4d585 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -239,12 +239,15 @@
#define ADC7_GPIO3 0x0c
#define ADC7_GPIO4 0x0d
+#define ADC7_SMB_TEMP 0x06
#define ADC7_CHG_TEMP 0x10
#define ADC7_USB_IN_V_16 0x11
#define ADC7_VDC_16 0x12
#define ADC7_CC1_ID 0x13
#define ADC7_VREF_BAT_THERM 0x15
#define ADC7_IIN_FB 0x17
+#define ADC7_ICHG_SMB 0x18
+#define ADC7_IIN_SMB 0x19
/* 30k pull-up1 */
#define ADC7_AMUX_THM1_30K_PU 0x24
diff --git a/include/dt-bindings/interconnect/qcom,msm8996-cbf.h b/include/dt-bindings/interconnect/qcom,msm8996-cbf.h
new file mode 100644
index 000000000000..aac5e69f6bd5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8996-cbf.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_CBF_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_CBF_H
+
+#define MASTER_CBF_M4M 0
+#define SLAVE_CBF_M4M 1
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,rpm-icc.h b/include/dt-bindings/interconnect/qcom,rpm-icc.h
new file mode 100644
index 000000000000..2cd56f91e5c5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,rpm-icc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_RPM_ICC_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_RPM_ICC_H
+
+#define RPM_ACTIVE_TAG (1 << 0)
+#define RPM_SLEEP_TAG (1 << 1)
+#define RPM_ALWAYS_TAG (RPM_ACTIVE_TAG | RPM_SLEEP_TAG)
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx75.h b/include/dt-bindings/interconnect/qcom,sdx75.h
new file mode 100644
index 000000000000..e903f5f3dd8f
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx75.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
+
+#define MASTER_QPIC_CORE 0
+#define MASTER_QUP_CORE_0 1
+#define SLAVE_QPIC_CORE 2
+#define SLAVE_QUP_CORE_0 3
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LAGG_CFG 1
+#define SLAVE_MCCC_MASTER 2
+#define SLAVE_GEM_NOC_CFG 3
+#define SLAVE_SNOOP_BWMON 4
+
+#define MASTER_SYS_TCU 0
+#define MASTER_APPSS_PROC 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_MSS_PROC 3
+#define MASTER_ANOC_PCIE_GEM_NOC 4
+#define MASTER_SNOC_SF_MEM_NOC 5
+#define MASTER_GIC 6
+#define MASTER_IPA_PCIE 7
+#define SLAVE_GEM_NOC_CNOC 8
+#define SLAVE_LLCC 9
+#define SLAVE_MEM_NOC_PCIE_SNOC 10
+#define SLAVE_SERVICE_GEM_NOC 11
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+
+#define MASTER_AUDIO 0
+#define MASTER_GIC_AHB 1
+#define MASTER_PCIE_RSCC 2
+#define MASTER_QDSS_BAM 3
+#define MASTER_QPIC 4
+#define MASTER_QUP_0 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_GEM_NOC_CNOC 7
+#define MASTER_GEM_NOC_PCIE_SNOC 8
+#define MASTER_SNOC_CFG 9
+#define MASTER_PCIE_ANOC_CFG 10
+#define MASTER_CRYPTO 11
+#define MASTER_IPA 12
+#define MASTER_MVMSS 13
+#define MASTER_EMAC_0 14
+#define MASTER_EMAC_1 15
+#define MASTER_QDSS_ETR 16
+#define MASTER_QDSS_ETR_1 17
+#define MASTER_SDCC_1 18
+#define MASTER_SDCC_4 19
+#define MASTER_USB3_0 20
+#define SLAVE_ETH0_CFG 21
+#define SLAVE_ETH1_CFG 22
+#define SLAVE_AUDIO 23
+#define SLAVE_CLK_CTL 24
+#define SLAVE_CRYPTO_0_CFG 25
+#define SLAVE_IMEM_CFG 26
+#define SLAVE_IPA_CFG 27
+#define SLAVE_IPC_ROUTER_CFG 28
+#define SLAVE_CNOC_MSS 29
+#define SLAVE_ICBDI_MVMSS_CFG 30
+#define SLAVE_PCIE_0_CFG 31
+#define SLAVE_PCIE_1_CFG 32
+#define SLAVE_PCIE_2_CFG 33
+#define SLAVE_PCIE_RSC_CFG 34
+#define SLAVE_PDM 35
+#define SLAVE_PRNG 36
+#define SLAVE_QDSS_CFG 37
+#define SLAVE_QPIC 38
+#define SLAVE_QUP_0 39
+#define SLAVE_SDCC_1 40
+#define SLAVE_SDCC_4 41
+#define SLAVE_SPMI_VGI_COEX 42
+#define SLAVE_TCSR 43
+#define SLAVE_TLMM 44
+#define SLAVE_USB3 45
+#define SLAVE_USB3_PHY_CFG 46
+#define SLAVE_A1NOC_CFG 47
+#define SLAVE_DDRSS_CFG 48
+#define SLAVE_SNOC_GEM_NOC_SF 49
+#define SLAVE_SNOC_CFG 50
+#define SLAVE_PCIE_ANOC_CFG 51
+#define SLAVE_IMEM 52
+#define SLAVE_SERVICE_PCIE_ANOC 53
+#define SLAVE_SERVICE_SNOC 54
+#define SLAVE_PCIE_0 55
+#define SLAVE_PCIE_1 56
+#define SLAVE_PCIE_2 57
+#define SLAVE_QDSS_STM 58
+#define SLAVE_TCU 59
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm6115.h b/include/dt-bindings/interconnect/qcom,sm6115.h
new file mode 100644
index 000000000000..21090e585f05
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm6115.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H
+
+/* BIMC */
+#define MASTER_AMPSS_M0 0
+#define MASTER_SNOC_BIMC_RT 1
+#define MASTER_SNOC_BIMC_NRT 2
+#define SNOC_BIMC_MAS 3
+#define MASTER_GRAPHICS_3D 4
+#define MASTER_TCU_0 5
+#define SLAVE_EBI_CH0 6
+#define BIMC_SNOC_SLV 7
+
+/* CNOC */
+#define SNOC_CNOC_MAS 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_AHB2PHY_USB 2
+#define SLAVE_APSS_THROTTLE_CFG 3
+#define SLAVE_BIMC_CFG 4
+#define SLAVE_BOOT_ROM 5
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 6
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CLK_CTL 9
+#define SLAVE_RBCPR_CX_CFG 10
+#define SLAVE_RBCPR_MX_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_DCC_CFG 13
+#define SLAVE_DDR_PHY_CFG 14
+#define SLAVE_DDR_SS_CFG 15
+#define SLAVE_DISPLAY_CFG 16
+#define SLAVE_DISPLAY_THROTTLE_CFG 17
+#define SLAVE_GPU_CFG 18
+#define SLAVE_GPU_THROTTLE_CFG 19
+#define SLAVE_HWKM_CORE 20
+#define SLAVE_IMEM_CFG 21
+#define SLAVE_IPA_CFG 22
+#define SLAVE_LPASS 23
+#define SLAVE_MAPSS 24
+#define SLAVE_MDSP_MPU_CFG 25
+#define SLAVE_MESSAGE_RAM 26
+#define SLAVE_CNOC_MSS 27
+#define SLAVE_PDM 28
+#define SLAVE_PIMEM_CFG 29
+#define SLAVE_PKA_CORE 30
+#define SLAVE_PMIC_ARB 31
+#define SLAVE_QDSS_CFG 32
+#define SLAVE_QM_CFG 33
+#define SLAVE_QM_MPU_CFG 34
+#define SLAVE_QPIC 35
+#define SLAVE_QUP_0 36
+#define SLAVE_RPM 37
+#define SLAVE_SDCC_1 38
+#define SLAVE_SDCC_2 39
+#define SLAVE_SECURITY 40
+#define SLAVE_SNOC_CFG 41
+#define SLAVE_TCSR 42
+#define SLAVE_TLMM 43
+#define SLAVE_USB3 44
+#define SLAVE_VENUS_CFG 45
+#define SLAVE_VENUS_THROTTLE_CFG 46
+#define SLAVE_VSENSE_CTRL_CFG 47
+#define SLAVE_SERVICE_CNOC 48
+
+/* SNOC */
+#define MASTER_CRYPTO_CORE0 0
+#define MASTER_SNOC_CFG 1
+#define MASTER_TIC 2
+#define MASTER_ANOC_SNOC 3
+#define BIMC_SNOC_MAS 4
+#define MASTER_PIMEM 5
+#define MASTER_QDSS_BAM 6
+#define MASTER_QPIC 7
+#define MASTER_QUP_0 8
+#define MASTER_IPA 9
+#define MASTER_QDSS_ETR 10
+#define MASTER_SDCC_1 11
+#define MASTER_SDCC_2 12
+#define MASTER_USB3 13
+#define SLAVE_APPSS 14
+#define SNOC_CNOC_SLV 15
+#define SLAVE_OCIMEM 16
+#define SLAVE_PIMEM 17
+#define SNOC_BIMC_SLV 18
+#define SLAVE_SERVICE_SNOC 19
+#define SLAVE_QDSS_STM 20
+#define SLAVE_TCU 21
+#define SLAVE_ANOC_SNOC 22
+
+/* CLK Virtual */
+#define MASTER_QUP_CORE_0 0
+#define SLAVE_QUP_CORE_0 1
+
+/* MMRT Virtual */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP_PORT0 1
+#define SLAVE_SNOC_BIMC_RT 2
+
+/* MMNRT Virtual */
+#define MASTER_CAMNOC_SF 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define SLAVE_SNOC_BIMC_NRT 3
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8250.h b/include/dt-bindings/interconnect/qcom,sm8250.h
index a4af5cc19271..2a656c02df4b 100644
--- a/include/dt-bindings/interconnect/qcom,sm8250.h
+++ b/include/dt-bindings/interconnect/qcom,sm8250.h
@@ -166,4 +166,11 @@
#define SLAVE_QDSS_STM 17
#define SLAVE_TCU 18
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
new file mode 100644
index 000000000000..6c1eaf04e241
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_3 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_IPA 3
+#define MASTER_SP 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_QDSS_ETR_1 6
+#define MASTER_SDCC_2 7
+#define SLAVE_A2NOC_SNOC 8
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_RBCPR_CX_CFG 5
+#define SLAVE_CPR_HMX 6
+#define SLAVE_RBCPR_MMCX_CFG 7
+#define SLAVE_RBCPR_MXA_CFG 8
+#define SLAVE_RBCPR_MXC_CFG 9
+#define SLAVE_CPR_NSPCX 10
+#define SLAVE_CRYPTO_0_CFG 11
+#define SLAVE_CX_RDPM 12
+#define SLAVE_DISPLAY_CFG 13
+#define SLAVE_GFX3D_CFG 14
+#define SLAVE_I2C 15
+#define SLAVE_I3C_IBI0_CFG 16
+#define SLAVE_I3C_IBI1_CFG 17
+#define SLAVE_IMEM_CFG 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_2_RDPM 20
+#define SLAVE_MX_RDPM 21
+#define SLAVE_PCIE_0_CFG 22
+#define SLAVE_PCIE_1_CFG 23
+#define SLAVE_PCIE_RSCC 24
+#define SLAVE_PDM 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_3 29
+#define SLAVE_QUP_1 30
+#define SLAVE_QUP_2 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB3_0 38
+#define SLAVE_VENUS_CFG 39
+#define SLAVE_VSENSE_CTRL_CFG 40
+#define SLAVE_CNOC_MNOC_CFG 41
+#define SLAVE_NSP_QTB_CFG 42
+#define SLAVE_PCIE_ANOC_CFG 43
+#define SLAVE_SERVICE_CNOC_CFG 44
+#define SLAVE_QDSS_STM 45
+#define SLAVE_TCU 46
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_CFG 4
+#define SLAVE_TME_CFG 5
+#define SLAVE_APPSS 6
+#define SLAVE_CNOC_CFG 7
+#define SLAVE_DDRSS_CFG 8
+#define SLAVE_IMEM 9
+#define SLAVE_SERVICE_CNOC 10
+#define SLAVE_PCIE_0 11
+#define SLAVE_PCIE_1 12
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_UBWC_P_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_GFX3D 4
+#define MASTER_LPASS_GEM_NOC 5
+#define MASTER_MSS_PROC 6
+#define MASTER_MNOC_HF_MEM_NOC 7
+#define MASTER_MNOC_SF_MEM_NOC 8
+#define MASTER_COMPUTE_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define MASTER_UBWC_P 12
+#define MASTER_GIC 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_MEM_NOC_PCIE_SNOC 16
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CDSP_HCP 4
+#define MASTER_VIDEO 5
+#define MASTER_VIDEO_CV_PROC 6
+#define MASTER_VIDEO_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define SLAVE_SNOC_GEM_NOC_SF 2
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h
new file mode 100644
index 000000000000..a38c3472698a
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_SDCC_4 2
+#define MASTER_UFS_MEM 3
+#define SLAVE_A1NOC_SNOC 4
+
+#define MASTER_QUP_0 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_SP 3
+#define MASTER_QDSS_ETR 4
+#define MASTER_QDSS_ETR_1 5
+#define MASTER_SDCC_2 6
+#define SLAVE_A2NOC_SNOC 7
+
+#define MASTER_DDR_PERF_MODE 0
+#define MASTER_QUP_CORE_0 1
+#define MASTER_QUP_CORE_1 2
+#define MASTER_QUP_CORE_2 3
+#define SLAVE_DDR_PERF_MODE 4
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_AHB2PHY_2 3
+#define SLAVE_AV1_ENC_CFG 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_DISPLAY_CFG 8
+#define SLAVE_GFX3D_CFG 9
+#define SLAVE_IMEM_CFG 10
+#define SLAVE_IPC_ROUTER_CFG 11
+#define SLAVE_PCIE_0_CFG 12
+#define SLAVE_PCIE_1_CFG 13
+#define SLAVE_PCIE_2_CFG 14
+#define SLAVE_PCIE_3_CFG 15
+#define SLAVE_PCIE_4_CFG 16
+#define SLAVE_PCIE_5_CFG 17
+#define SLAVE_PCIE_6A_CFG 18
+#define SLAVE_PCIE_6B_CFG 19
+#define SLAVE_PCIE_RSC_CFG 20
+#define SLAVE_PDM 21
+#define SLAVE_PRNG 22
+#define SLAVE_QDSS_CFG 23
+#define SLAVE_QSPI_0 24
+#define SLAVE_QUP_0 25
+#define SLAVE_QUP_1 26
+#define SLAVE_QUP_2 27
+#define SLAVE_SDCC_2 28
+#define SLAVE_SDCC_4 29
+#define SLAVE_SMMUV3_CFG 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM 32
+#define SLAVE_UFS_MEM_CFG 33
+#define SLAVE_USB2 34
+#define SLAVE_USB3_0 35
+#define SLAVE_USB3_1 36
+#define SLAVE_USB3_2 37
+#define SLAVE_USB3_MP 38
+#define SLAVE_USB4_0 39
+#define SLAVE_USB4_1 40
+#define SLAVE_USB4_2 41
+#define SLAVE_VENUS_CFG 42
+#define SLAVE_LPASS_QTB_CFG 43
+#define SLAVE_CNOC_MNOC_CFG 44
+#define SLAVE_NSP_QTB_CFG 45
+#define SLAVE_QDSS_STM 46
+#define SLAVE_TCU 47
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_TME_CFG 3
+#define SLAVE_APPSS 4
+#define SLAVE_CNOC_CFG 5
+#define SLAVE_BOOT_IMEM 6
+#define SLAVE_IMEM 7
+#define SLAVE_PCIE_0 8
+#define SLAVE_PCIE_1 9
+#define SLAVE_PCIE_2 10
+#define SLAVE_PCIE_3 11
+#define SLAVE_PCIE_4 12
+#define SLAVE_PCIE_5 13
+#define SLAVE_PCIE_6A 14
+#define SLAVE_PCIE_6B 15
+
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_GFX3D 4
+#define MASTER_LPASS_GEM_NOC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_GIC2 11
+#define SLAVE_GEM_NOC_CNOC 12
+#define SLAVE_LLCC 13
+#define SLAVE_MEM_NOC_PCIE_SNOC 14
+#define MASTER_MNOC_HF_MEM_NOC_DISP 15
+#define MASTER_ANOC_PCIE_GEM_NOC_DISP 16
+#define SLAVE_LLCC_DISP 17
+#define MASTER_ANOC_PCIE_GEM_NOC_PCIE 18
+#define SLAVE_LLCC_PCIE 19
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+#define MASTER_LLCC_PCIE 4
+#define SLAVE_EBI1_PCIE 5
+
+#define MASTER_AV1_ENC 0
+#define MASTER_CAMNOC_HF 1
+#define MASTER_CAMNOC_ICP 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_EVA 4
+#define MASTER_MDP 5
+#define MASTER_VIDEO 6
+#define MASTER_VIDEO_CV_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+#define MASTER_MDP_DISP 13
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 14
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_NORTH 0
+#define MASTER_PCIE_SOUTH 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+#define MASTER_PCIE_NORTH_PCIE 3
+#define MASTER_PCIE_SOUTH_PCIE 4
+#define SLAVE_ANOC_PCIE_GEM_NOC_PCIE 5
+
+#define MASTER_PCIE_3 0
+#define MASTER_PCIE_4 1
+#define MASTER_PCIE_5 2
+#define SLAVE_PCIE_NORTH 3
+#define MASTER_PCIE_3_PCIE 4
+#define MASTER_PCIE_4_PCIE 5
+#define MASTER_PCIE_5_PCIE 6
+#define SLAVE_PCIE_NORTH_PCIE 7
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+#define MASTER_PCIE_6A 3
+#define MASTER_PCIE_6B 4
+#define SLAVE_PCIE_SOUTH 5
+#define MASTER_PCIE_0_PCIE 6
+#define MASTER_PCIE_1_PCIE 7
+#define MASTER_PCIE_2_PCIE 8
+#define MASTER_PCIE_6A_PCIE 9
+#define MASTER_PCIE_6B_PCIE 10
+#define SLAVE_PCIE_SOUTH_PCIE 11
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_GIC1 2
+#define MASTER_USB_NOC_SNOC 3
+#define SLAVE_SNOC_GEM_NOC_SF 4
+
+#define MASTER_AGGRE_USB_NORTH 0
+#define MASTER_AGGRE_USB_SOUTH 1
+#define SLAVE_USB_NOC_SNOC 2
+
+#define MASTER_USB2 0
+#define MASTER_USB3_MP 1
+#define SLAVE_AGGRE_USB_NORTH 2
+
+#define MASTER_USB3_0 0
+#define MASTER_USB3_1 1
+#define MASTER_USB3_2 2
+#define MASTER_USB4_0 3
+#define MASTER_USB4_1 4
+#define MASTER_USB4_2 5
+#define SLAVE_AGGRE_USB_SOUTH 6
+
+#endif
diff --git a/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h b/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h
new file mode 100644
index 000000000000..bd415cb7b669
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H
+#define _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H
+
+/* IRQID[11:0] - GPIOAO[11:0] */
+#define IRQID_GPIOAO_0 0
+#define IRQID_GPIOAO_1 1
+#define IRQID_GPIOAO_2 2
+#define IRQID_GPIOAO_3 3
+#define IRQID_GPIOAO_4 4
+#define IRQID_GPIOAO_5 5
+#define IRQID_GPIOAO_6 6
+#define IRQID_GPIOAO_7 7
+#define IRQID_GPIOAO_8 8
+#define IRQID_GPIOAO_9 9
+#define IRQID_GPIOAO_10 10
+#define IRQID_GPIOAO_11 11
+
+/* IRQID[27:12] - GPIOZ[15:0] */
+#define IRQID_GPIOZ_0 12
+#define IRQID_GPIOZ_1 13
+#define IRQID_GPIOZ_2 14
+#define IRQID_GPIOZ_3 15
+#define IRQID_GPIOZ_4 16
+#define IRQID_GPIOZ_5 17
+#define IRQID_GPIOZ_6 18
+#define IRQID_GPIOZ_7 19
+#define IRQID_GPIOZ_8 20
+#define IRQID_GPIOZ_9 21
+#define IRQID_GPIOZ_10 22
+#define IRQID_GPIOZ_11 23
+#define IRQID_GPIOZ_12 24
+#define IRQID_GPIOZ_13 25
+#define IRQID_GPIOZ_14 26
+#define IRQID_GPIOZ_15 27
+
+/* IRQID[36:28] - GPIOH[8:0] */
+#define IRQID_GPIOH_0 28
+#define IRQID_GPIOH_1 29
+#define IRQID_GPIOH_2 30
+#define IRQID_GPIOH_3 31
+#define IRQID_GPIOH_4 32
+#define IRQID_GPIOH_5 33
+#define IRQID_GPIOH_6 34
+#define IRQID_GPIOH_7 35
+#define IRQID_GPIOH_8 36
+
+/* IRQID[52:37] - BOOT[15:0] */
+#define IRQID_BOOT_0 37
+#define IRQID_BOOT_1 38
+#define IRQID_BOOT_2 39
+#define IRQID_BOOT_3 40
+#define IRQID_BOOT_4 41
+#define IRQID_BOOT_5 42
+#define IRQID_BOOT_6 43
+#define IRQID_BOOT_7 44
+#define IRQID_BOOT_8 45
+#define IRQID_BOOT_9 46
+#define IRQID_BOOT_10 47
+#define IRQID_BOOT_11 48
+#define IRQID_BOOT_12 49
+#define IRQID_BOOT_13 50
+#define IRQID_BOOT_14 51
+#define IRQID_BOOT_15 52
+
+/* IRQID[60:53] - GPIOC[7:0] */
+#define IRQID_GPIOC_0 53
+#define IRQID_GPIOC_1 54
+#define IRQID_GPIOC_2 55
+#define IRQID_GPIOC_3 56
+#define IRQID_GPIOC_4 57
+#define IRQID_GPIOC_5 58
+#define IRQID_GPIOC_6 59
+#define IRQID_GPIOC_7 60
+
+/* IRQID[76:61] - GPIOA[15:0] */
+#define IRQID_GPIOA_0 61
+#define IRQID_GPIOA_1 62
+#define IRQID_GPIOA_2 63
+#define IRQID_GPIOA_3 64
+#define IRQID_GPIOA_4 65
+#define IRQID_GPIOA_5 66
+#define IRQID_GPIOA_6 67
+#define IRQID_GPIOA_7 68
+#define IRQID_GPIOA_8 69
+#define IRQID_GPIOA_9 70
+#define IRQID_GPIOA_10 71
+#define IRQID_GPIOA_11 72
+#define IRQID_GPIOA_12 73
+#define IRQID_GPIOA_13 74
+#define IRQID_GPIOA_14 75
+#define IRQID_GPIOA_15 76
+
+/* IRQID[96:77] - GPIOX[19:0] */
+#define IRQID_GPIOX_0 77
+#define IRQID_GPIOX_1 78
+#define IRQID_GPIOX_2 79
+#define IRQID_GPIOX_3 80
+#define IRQID_GPIOX_4 81
+#define IRQID_GPIOX_5 82
+#define IRQID_GPIOX_6 83
+#define IRQID_GPIOX_7 84
+#define IRQID_GPIOX_8 85
+#define IRQID_GPIOX_9 86
+#define IRQID_GPIOX_10 87
+#define IRQID_GPIOX_11 88
+#define IRQID_GPIOX_12 89
+#define IRQID_GPIOX_13 90
+#define IRQID_GPIOX_14 91
+#define IRQID_GPIOX_15 92
+#define IRQID_GPIOX_16 93
+#define IRQID_GPIOX_17 94
+#define IRQID_GPIOX_18 95
+#define IRQID_GPIOX_19 96
+
+/* IRQID[99:97] - GPIOE[2:0] */
+#define IRQID_GPIOE_0 97
+#define IRQID_GPIOE_1 98
+#define IRQID_GPIOE_2 99
+
+#endif /* _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H */
diff --git a/include/dt-bindings/leds/leds-lp55xx.h b/include/dt-bindings/leds/leds-lp55xx.h
new file mode 100644
index 000000000000..a4fb4567715d
--- /dev/null
+++ b/include/dt-bindings/leds/leds-lp55xx.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef _DT_BINDINGS_LEDS_LP55XX_H
+#define _DT_BINDINGS_LEDS_LP55XX_H
+
+#define LP55XX_CP_OFF 0
+#define LP55XX_CP_BYPASS 1
+#define LP55XX_CP_BOOST 2
+#define LP55XX_CP_AUTO 3
+
+#endif /* _DT_BINDINGS_LEDS_LP55XX_H */
diff --git a/include/dt-bindings/memory/mediatek,mt8188-memory-port.h b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
new file mode 100644
index 000000000000..337ab11262af
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chengci Xu <chengci.xu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU larbs:
+ * From below, for example larb11 has larb11a/larb11b/larb11c,
+ * the index of larb is not in order. So we reindexed these larbs from a
+ * software view.
+ */
+#define SMI_L0_ID 0
+#define SMI_L1_ID 1
+#define SMI_L2_ID 2
+#define SMI_L3_ID 3
+#define SMI_L4_ID 4
+#define SMI_L5_ID 5
+#define SMI_L6_ID 6
+#define SMI_L7_ID 7
+#define SMI_L9_ID 8
+#define SMI_L10_ID 9
+#define SMI_L11A_ID 10
+#define SMI_L11B_ID 11
+#define SMI_L11C_ID 12
+#define SMI_L12_ID 13
+#define SMI_L13_ID 14
+#define SMI_L14_ID 15
+#define SMI_L15_ID 16
+#define SMI_L16A_ID 17
+#define SMI_L16B_ID 18
+#define SMI_L17A_ID 19
+#define SMI_L17B_ID 20
+#define SMI_L19_ID 21
+#define SMI_L21_ID 22
+#define SMI_L23_ID 23
+#define SMI_L27_ID 24
+#define SMI_L28_ID 25
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19(21)[1]/21(22)/23
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb27(24): port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb27(24): port 2/3
+ *
+ * This SoC have two MM IOMMU HWs, this is the connected information:
+ * iommu-vdo: larb0/2/5/9/10/11A/11C/13/16B/17B/19/21
+ * iommu-vpp: larb1/3/4/6/7/11B/12/14/15/16A/17A/23/27
+ *
+ * [1]: This is larb19, but the index is 21 from the SW view.
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- VDO-0 */
+#define M4U_PORT_L0_DISP_RDMA1 MTK_M4U_ID(SMI_L0_ID, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(SMI_L0_ID, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(SMI_L0_ID, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(SMI_L0_ID, 4)
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(SMI_L0_ID, 5)
+#define M4U_PORT_L0_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 6)
+
+/* LARB 1 -- VD0-0 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(SMI_L1_ID, 0)
+#define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 1)
+#define M4U_PORT_L1_DISP_OVL1_RDMA0 MTK_M4U_ID(SMI_L1_ID, 2)
+#define M4U_PORT_L1_DISP_OVL1_RDMA1 MTK_M4U_ID(SMI_L1_ID, 3)
+#define M4U_PORT_L1_DISP_OVL1_HDR MTK_M4U_ID(SMI_L1_ID, 4)
+#define M4U_PORT_L1_DISP_WROT0 MTK_M4U_ID(SMI_L1_ID, 5)
+#define M4U_PORT_L1_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 6)
+
+/* LARB 2 -- VDO-1 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(SMI_L2_ID, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(SMI_L2_ID, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(SMI_L2_ID, 4)
+
+/* LARB 3 -- VDO-1 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(SMI_L3_ID, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(SMI_L3_ID, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(SMI_L3_ID, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(SMI_L3_ID, 3)
+#define M4U_PORT_L3_HDR_DS_SMI MTK_M4U_ID(SMI_L3_ID, 4)
+#define M4U_PORT_L3_HDR_ADL_SMI MTK_M4U_ID(SMI_L3_ID, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(SMI_L3_ID, 6)
+
+/* LARB 4 -- VPP-0 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(SMI_L4_ID, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(SMI_L4_ID, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(SMI_L4_ID, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(SMI_L4_ID, 3)
+#define M4U_PORT_L4_FAKE_ENG MTK_M4U_ID(SMI_L4_ID, 4)
+#define M4U_PORT_L4_DISP_RDMA MTK_M4U_ID(SMI_L4_ID, 5)
+#define M4U_PORT_L4_DISP_WDMA MTK_M4U_ID(SMI_L4_ID, 6)
+
+/* LARB 5 -- VPP-1 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(SMI_L5_ID, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(SMI_L5_ID, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(SMI_L5_ID, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 6)
+#define M4U_PORT_L5_LARB5_FAKE_ENG MTK_M4U_ID(SMI_L5_ID, 7)
+
+/* LARB 6 -- VPP-1 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(SMI_L6_ID, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(SMI_L6_ID, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(SMI_L6_ID, 2)
+#define M4U_PORT_L6_LARB6_FAKE_ENG MTK_M4U_ID(SMI_L6_ID, 3)
+
+/* LARB 7 -- WPE */
+#define M4U_PORT_L7_WPE_RDMA_0 MTK_M4U_ID(SMI_L7_ID, 0)
+#define M4U_PORT_L7_WPE_RDMA_1 MTK_M4U_ID(SMI_L7_ID, 1)
+#define M4U_PORT_L7_WPE_WDMA_0 MTK_M4U_ID(SMI_L7_ID, 2)
+
+/* LARB 9 -- IMG-M */
+#define M4U_PORT_L9_IMGI_T1_A MTK_M4U_ID(SMI_L9_ID, 0)
+#define M4U_PORT_L9_UFDI_T1_A MTK_M4U_ID(SMI_L9_ID, 1)
+#define M4U_PORT_L9_IMGBI_T1_A MTK_M4U_ID(SMI_L9_ID, 2)
+#define M4U_PORT_L9_IMGCI_T1_A MTK_M4U_ID(SMI_L9_ID, 3)
+#define M4U_PORT_L9_SMTI_T1_A MTK_M4U_ID(SMI_L9_ID, 4)
+#define M4U_PORT_L9_SMTI_T4_A MTK_M4U_ID(SMI_L9_ID, 5)
+#define M4U_PORT_L9_TNCSTI_T1_A MTK_M4U_ID(SMI_L9_ID, 6)
+#define M4U_PORT_L9_TNCSTI_T4_A MTK_M4U_ID(SMI_L9_ID, 7)
+#define M4U_PORT_L9_YUVO_T1_A MTK_M4U_ID(SMI_L9_ID, 8)
+#define M4U_PORT_L9_YUVBO_T1_A MTK_M4U_ID(SMI_L9_ID, 9)
+#define M4U_PORT_L9_YUVCO_T1_A MTK_M4U_ID(SMI_L9_ID, 10)
+#define M4U_PORT_L9_TIMGO_T1_A MTK_M4U_ID(SMI_L9_ID, 11)
+#define M4U_PORT_L9_YUVO_T2_A MTK_M4U_ID(SMI_L9_ID, 12)
+#define M4U_PORT_L9_YUVO_T5_A MTK_M4U_ID(SMI_L9_ID, 13)
+#define M4U_PORT_L9_IMGI_T1_B MTK_M4U_ID(SMI_L9_ID, 14)
+#define M4U_PORT_L9_IMGBI_T1_B MTK_M4U_ID(SMI_L9_ID, 15)
+#define M4U_PORT_L9_IMGCI_T1_B MTK_M4U_ID(SMI_L9_ID, 16)
+#define M4U_PORT_L9_SMTI_T4_B MTK_M4U_ID(SMI_L9_ID, 17)
+#define M4U_PORT_L9_TNCSO_T1_A MTK_M4U_ID(SMI_L9_ID, 18)
+#define M4U_PORT_L9_SMTO_T1_A MTK_M4U_ID(SMI_L9_ID, 19)
+#define M4U_PORT_L9_SMTO_T4_A MTK_M4U_ID(SMI_L9_ID, 20)
+#define M4U_PORT_L9_TNCSTO_T1_A MTK_M4U_ID(SMI_L9_ID, 21)
+#define M4U_PORT_L9_YUVO_T2_B MTK_M4U_ID(SMI_L9_ID, 22)
+#define M4U_PORT_L9_YUVO_T5_B MTK_M4U_ID(SMI_L9_ID, 23)
+#define M4U_PORT_L9_SMTO_T4_B MTK_M4U_ID(SMI_L9_ID, 24)
+
+/* LARB 10 -- IMG-D */
+#define M4U_PORT_L10_IMGI_D1 MTK_M4U_ID(SMI_L10_ID, 0)
+#define M4U_PORT_L10_IMGBI_D1 MTK_M4U_ID(SMI_L10_ID, 1)
+#define M4U_PORT_L10_IMGCI_D1 MTK_M4U_ID(SMI_L10_ID, 2)
+#define M4U_PORT_L10_IMGDI_D1 MTK_M4U_ID(SMI_L10_ID, 3)
+#define M4U_PORT_L10_DEPI_D1 MTK_M4U_ID(SMI_L10_ID, 4)
+#define M4U_PORT_L10_DMGI_D1 MTK_M4U_ID(SMI_L10_ID, 5)
+#define M4U_PORT_L10_SMTI_D1 MTK_M4U_ID(SMI_L10_ID, 6)
+#define M4U_PORT_L10_RECI_D1 MTK_M4U_ID(SMI_L10_ID, 7)
+#define M4U_PORT_L10_RECI_D1_N MTK_M4U_ID(SMI_L10_ID, 8)
+#define M4U_PORT_L10_TNRWI_D1 MTK_M4U_ID(SMI_L10_ID, 9)
+#define M4U_PORT_L10_TNRCI_D1 MTK_M4U_ID(SMI_L10_ID, 10)
+#define M4U_PORT_L10_TNRCI_D1_N MTK_M4U_ID(SMI_L10_ID, 11)
+#define M4U_PORT_L10_IMG4O_D1 MTK_M4U_ID(SMI_L10_ID, 12)
+#define M4U_PORT_L10_IMG4BO_D1 MTK_M4U_ID(SMI_L10_ID, 13)
+#define M4U_PORT_L10_SMTI_D8 MTK_M4U_ID(SMI_L10_ID, 14)
+#define M4U_PORT_L10_SMTO_D1 MTK_M4U_ID(SMI_L10_ID, 15)
+#define M4U_PORT_L10_TNRMO_D1 MTK_M4U_ID(SMI_L10_ID, 16)
+#define M4U_PORT_L10_TNRMO_D1_N MTK_M4U_ID(SMI_L10_ID, 17)
+#define M4U_PORT_L10_SMTO_D8 MTK_M4U_ID(SMI_L10_ID, 18)
+#define M4U_PORT_L10_DBGO_D1 MTK_M4U_ID(SMI_L10_ID, 19)
+
+/* LARB 11A -- IMG-D */
+#define M4U_PORT_L11A_WPE_RDMA_0 MTK_M4U_ID(SMI_L11A_ID, 0)
+#define M4U_PORT_L11A_WPE_RDMA_1 MTK_M4U_ID(SMI_L11A_ID, 1)
+#define M4U_PORT_L11A_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 2)
+#define M4U_PORT_L11A_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11A_ID, 3)
+#define M4U_PORT_L11A_WPE_CQ0 MTK_M4U_ID(SMI_L11A_ID, 4)
+#define M4U_PORT_L11A_WPE_CQ1 MTK_M4U_ID(SMI_L11A_ID, 5)
+#define M4U_PORT_L11A_PIMGI_P1 MTK_M4U_ID(SMI_L11A_ID, 6)
+#define M4U_PORT_L11A_PIMGBI_P1 MTK_M4U_ID(SMI_L11A_ID, 7)
+#define M4U_PORT_L11A_PIMGCI_P1 MTK_M4U_ID(SMI_L11A_ID, 8)
+#define M4U_PORT_L11A_IMGI_T1_C MTK_M4U_ID(SMI_L11A_ID, 9)
+#define M4U_PORT_L11A_IMGBI_T1_C MTK_M4U_ID(SMI_L11A_ID, 10)
+#define M4U_PORT_L11A_IMGCI_T1_C MTK_M4U_ID(SMI_L11A_ID, 11)
+#define M4U_PORT_L11A_SMTI_T1_C MTK_M4U_ID(SMI_L11A_ID, 12)
+#define M4U_PORT_L11A_SMTI_T4_C MTK_M4U_ID(SMI_L11A_ID, 13)
+#define M4U_PORT_L11A_SMTI_T6_C MTK_M4U_ID(SMI_L11A_ID, 14)
+#define M4U_PORT_L11A_YUVO_T1_C MTK_M4U_ID(SMI_L11A_ID, 15)
+#define M4U_PORT_L11A_YUVBO_T1_C MTK_M4U_ID(SMI_L11A_ID, 16)
+#define M4U_PORT_L11A_YUVCO_T1_C MTK_M4U_ID(SMI_L11A_ID, 17)
+#define M4U_PORT_L11A_WPE_WDMA_0 MTK_M4U_ID(SMI_L11A_ID, 18)
+#define M4U_PORT_L11A_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 19)
+#define M4U_PORT_L11A_WROT_P1 MTK_M4U_ID(SMI_L11A_ID, 20)
+#define M4U_PORT_L11A_TCCSO_P1 MTK_M4U_ID(SMI_L11A_ID, 21)
+#define M4U_PORT_L11A_TCCSI_P1 MTK_M4U_ID(SMI_L11A_ID, 22)
+#define M4U_PORT_L11A_TIMGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 23)
+#define M4U_PORT_L11A_YUVO_T2_C MTK_M4U_ID(SMI_L11A_ID, 24)
+#define M4U_PORT_L11A_YUVO_T5_C MTK_M4U_ID(SMI_L11A_ID, 25)
+#define M4U_PORT_L11A_SMTO_T1_C MTK_M4U_ID(SMI_L11A_ID, 26)
+#define M4U_PORT_L11A_SMTO_T4_C MTK_M4U_ID(SMI_L11A_ID, 27)
+#define M4U_PORT_L11A_SMTO_T6_C MTK_M4U_ID(SMI_L11A_ID, 28)
+#define M4U_PORT_L11A_DBGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 29)
+
+/* LARB 11B -- IMG-D */
+#define M4U_PORT_L11B_WPE_RDMA_0 MTK_M4U_ID(SMI_L11B_ID, 0)
+#define M4U_PORT_L11B_WPE_RDMA_1 MTK_M4U_ID(SMI_L11B_ID, 1)
+#define M4U_PORT_L11B_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 2)
+#define M4U_PORT_L11B_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11B_ID, 3)
+#define M4U_PORT_L11B_WPE_CQ0 MTK_M4U_ID(SMI_L11B_ID, 4)
+#define M4U_PORT_L11B_WPE_CQ1 MTK_M4U_ID(SMI_L11B_ID, 5)
+#define M4U_PORT_L11B_PIMGI_P1 MTK_M4U_ID(SMI_L11B_ID, 6)
+#define M4U_PORT_L11B_PIMGBI_P1 MTK_M4U_ID(SMI_L11B_ID, 7)
+#define M4U_PORT_L11B_PIMGCI_P1 MTK_M4U_ID(SMI_L11B_ID, 8)
+#define M4U_PORT_L11B_IMGI_T1_C MTK_M4U_ID(SMI_L11B_ID, 9)
+#define M4U_PORT_L11B_IMGBI_T1_C MTK_M4U_ID(SMI_L11B_ID, 10)
+#define M4U_PORT_L11B_IMGCI_T1_C MTK_M4U_ID(SMI_L11B_ID, 11)
+#define M4U_PORT_L11B_SMTI_T1_C MTK_M4U_ID(SMI_L11B_ID, 12)
+#define M4U_PORT_L11B_SMTI_T4_C MTK_M4U_ID(SMI_L11B_ID, 13)
+#define M4U_PORT_L11B_SMTI_T6_C MTK_M4U_ID(SMI_L11B_ID, 14)
+#define M4U_PORT_L11B_YUVO_T1_C MTK_M4U_ID(SMI_L11B_ID, 15)
+#define M4U_PORT_L11B_YUVBO_T1_C MTK_M4U_ID(SMI_L11B_ID, 16)
+#define M4U_PORT_L11B_YUVCO_T1_C MTK_M4U_ID(SMI_L11B_ID, 17)
+#define M4U_PORT_L11B_WPE_WDMA_0 MTK_M4U_ID(SMI_L11B_ID, 18)
+#define M4U_PORT_L11B_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 19)
+#define M4U_PORT_L11B_WROT_P1 MTK_M4U_ID(SMI_L11B_ID, 20)
+#define M4U_PORT_L11B_TCCSO_P1 MTK_M4U_ID(SMI_L11B_ID, 21)
+#define M4U_PORT_L11B_TCCSI_P1 MTK_M4U_ID(SMI_L11B_ID, 22)
+#define M4U_PORT_L11B_TIMGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 23)
+#define M4U_PORT_L11B_YUVO_T2_C MTK_M4U_ID(SMI_L11B_ID, 24)
+#define M4U_PORT_L11B_YUVO_T5_C MTK_M4U_ID(SMI_L11B_ID, 25)
+#define M4U_PORT_L11B_SMTO_T1_C MTK_M4U_ID(SMI_L11B_ID, 26)
+#define M4U_PORT_L11B_SMTO_T4_C MTK_M4U_ID(SMI_L11B_ID, 27)
+#define M4U_PORT_L11B_SMTO_T6_C MTK_M4U_ID(SMI_L11B_ID, 28)
+#define M4U_PORT_L11B_DBGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 29)
+
+/* LARB 11C -- IMG-D */
+#define M4U_PORT_L11C_WPE_RDMA_0 MTK_M4U_ID(SMI_L11C_ID, 0)
+#define M4U_PORT_L11C_WPE_RDMA_1 MTK_M4U_ID(SMI_L11C_ID, 1)
+#define M4U_PORT_L11C_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 2)
+#define M4U_PORT_L11C_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11C_ID, 3)
+#define M4U_PORT_L11C_WPE_CQ0 MTK_M4U_ID(SMI_L11C_ID, 4)
+#define M4U_PORT_L11C_WPE_CQ1 MTK_M4U_ID(SMI_L11C_ID, 5)
+#define M4U_PORT_L11C_PIMGI_P1 MTK_M4U_ID(SMI_L11C_ID, 6)
+#define M4U_PORT_L11C_PIMGBI_P1 MTK_M4U_ID(SMI_L11C_ID, 7)
+#define M4U_PORT_L11C_PIMGCI_P1 MTK_M4U_ID(SMI_L11C_ID, 8)
+#define M4U_PORT_L11C_IMGI_T1_C MTK_M4U_ID(SMI_L11C_ID, 9)
+#define M4U_PORT_L11C_IMGBI_T1_C MTK_M4U_ID(SMI_L11C_ID, 10)
+#define M4U_PORT_L11C_IMGCI_T1_C MTK_M4U_ID(SMI_L11C_ID, 11)
+#define M4U_PORT_L11C_SMTI_T1_C MTK_M4U_ID(SMI_L11C_ID, 12)
+#define M4U_PORT_L11C_SMTI_T4_C MTK_M4U_ID(SMI_L11C_ID, 13)
+#define M4U_PORT_L11C_SMTI_T6_C MTK_M4U_ID(SMI_L11C_ID, 14)
+#define M4U_PORT_L11C_YUVO_T1_C MTK_M4U_ID(SMI_L11C_ID, 15)
+#define M4U_PORT_L11C_YUVBO_T1_C MTK_M4U_ID(SMI_L11C_ID, 16)
+#define M4U_PORT_L11C_YUVCO_T1_C MTK_M4U_ID(SMI_L11C_ID, 17)
+#define M4U_PORT_L11C_WPE_WDMA_0 MTK_M4U_ID(SMI_L11C_ID, 18)
+#define M4U_PORT_L11C_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 19)
+#define M4U_PORT_L11C_WROT_P1 MTK_M4U_ID(SMI_L11C_ID, 20)
+#define M4U_PORT_L11C_TCCSO_P1 MTK_M4U_ID(SMI_L11C_ID, 21)
+#define M4U_PORT_L11C_TCCSI_P1 MTK_M4U_ID(SMI_L11C_ID, 22)
+#define M4U_PORT_L11C_TIMGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 23)
+#define M4U_PORT_L11C_YUVO_T2_C MTK_M4U_ID(SMI_L11C_ID, 24)
+#define M4U_PORT_L11C_YUVO_T5_C MTK_M4U_ID(SMI_L11C_ID, 25)
+#define M4U_PORT_L11C_SMTO_T1_C MTK_M4U_ID(SMI_L11C_ID, 26)
+#define M4U_PORT_L11C_SMTO_T4_C MTK_M4U_ID(SMI_L11C_ID, 27)
+#define M4U_PORT_L11C_SMTO_T6_C MTK_M4U_ID(SMI_L11C_ID, 28)
+#define M4U_PORT_L11C_DBGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 29)
+
+/* LARB 12 -- IPE */
+#define M4U_PORT_L12_FDVT_RDA_0 MTK_M4U_ID(SMI_L12_ID, 0)
+#define M4U_PORT_L12_FDVT_RDB_0 MTK_M4U_ID(SMI_L12_ID, 1)
+#define M4U_PORT_L12_FDVT_WRA_0 MTK_M4U_ID(SMI_L12_ID, 2)
+#define M4U_PORT_L12_FDVT_WRB_0 MTK_M4U_ID(SMI_L12_ID, 3)
+#define M4U_PORT_L12_ME_RDMA MTK_M4U_ID(SMI_L12_ID, 4)
+#define M4U_PORT_L12_ME_WDMA MTK_M4U_ID(SMI_L12_ID, 5)
+#define M4U_PORT_L12_DVS_RDMA MTK_M4U_ID(SMI_L12_ID, 6)
+#define M4U_PORT_L12_DVS_WDMA MTK_M4U_ID(SMI_L12_ID, 7)
+#define M4U_PORT_L12_DVP_RDMA MTK_M4U_ID(SMI_L12_ID, 8)
+#define M4U_PORT_L12_DVP_WDMA MTK_M4U_ID(SMI_L12_ID, 9)
+#define M4U_PORT_L12_FDVT_2ND_RDA_0 MTK_M4U_ID(SMI_L12_ID, 10)
+#define M4U_PORT_L12_FDVT_2ND_RDB_0 MTK_M4U_ID(SMI_L12_ID, 11)
+#define M4U_PORT_L12_FDVT_2ND_WRA_0 MTK_M4U_ID(SMI_L12_ID, 12)
+#define M4U_PORT_L12_FDVT_2ND_WRB_0 MTK_M4U_ID(SMI_L12_ID, 13)
+#define M4U_PORT_L12_DHZEI_E1 MTK_M4U_ID(SMI_L12_ID, 14)
+#define M4U_PORT_L12_DHZEO_E1 MTK_M4U_ID(SMI_L12_ID, 15)
+
+/* LARB 13 -- CAM-1 */
+#define M4U_PORT_L13_CAMSV_CQI_E1 MTK_M4U_ID(SMI_L13_ID, 0)
+#define M4U_PORT_L13_CAMSV_CQI_E2 MTK_M4U_ID(SMI_L13_ID, 1)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 2)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 3)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 4)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 5)
+#define M4U_PORT_L13_PDAI_A_0 MTK_M4U_ID(SMI_L13_ID, 6)
+#define M4U_PORT_L13_PDAI_A_1 MTK_M4U_ID(SMI_L13_ID, 7)
+#define M4U_PORT_L13_CAMSV_CQI_B_E1 MTK_M4U_ID(SMI_L13_ID, 8)
+#define M4U_PORT_L13_CAMSV_CQI_B_E2 MTK_M4U_ID(SMI_L13_ID, 9)
+#define M4U_PORT_L13_CAMSV_CQI_C_E1 MTK_M4U_ID(SMI_L13_ID, 10)
+#define M4U_PORT_L13_CAMSV_CQI_C_E2 MTK_M4U_ID(SMI_L13_ID, 11)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 12)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 13)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 14)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 15)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 16)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 17)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 18)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 19)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 20)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 21)
+#define M4U_PORT_L13_PDAO_A MTK_M4U_ID(SMI_L13_ID, 22)
+#define M4U_PORT_L13_PDAO_C MTK_M4U_ID(SMI_L13_ID, 23)
+
+/* LARB 14 -- CAM-1 */
+#define M4U_PORT_L14_GCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 0)
+#define M4U_PORT_L14_GCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 1)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 2)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 3)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 4)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 5)
+#define M4U_PORT_L14_PDAI_B_0 MTK_M4U_ID(SMI_L14_ID, 6)
+#define M4U_PORT_L14_PDAI_B_1 MTK_M4U_ID(SMI_L14_ID, 7)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 8)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 9)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 10)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 11)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 12)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 13)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 14)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 15)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 16)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 17)
+#define M4U_PORT_L14_PDAO_B MTK_M4U_ID(SMI_L14_ID, 18)
+#define M4U_PORT_L14_IPUI MTK_M4U_ID(SMI_L14_ID, 19)
+#define M4U_PORT_L14_IPUO MTK_M4U_ID(SMI_L14_ID, 20)
+#define M4U_PORT_L14_IPU3O MTK_M4U_ID(SMI_L14_ID, 21)
+#define M4U_PORT_L14_FAKE MTK_M4U_ID(SMI_L14_ID, 22)
+
+/* LARB 15 -- IMG-D */
+#define M4U_PORT_L15_VIPI_D1 MTK_M4U_ID(SMI_L15_ID, 0)
+#define M4U_PORT_L15_VIPBI_D1 MTK_M4U_ID(SMI_L15_ID, 1)
+#define M4U_PORT_L15_SMTI_D6 MTK_M4U_ID(SMI_L15_ID, 2)
+#define M4U_PORT_L15_TNCSTI_D1 MTK_M4U_ID(SMI_L15_ID, 3)
+#define M4U_PORT_L15_TNCSTI_D4 MTK_M4U_ID(SMI_L15_ID, 4)
+#define M4U_PORT_L15_SMTI_D4 MTK_M4U_ID(SMI_L15_ID, 5)
+#define M4U_PORT_L15_IMG3O_D1 MTK_M4U_ID(SMI_L15_ID, 6)
+#define M4U_PORT_L15_IMG3BO_D1 MTK_M4U_ID(SMI_L15_ID, 7)
+#define M4U_PORT_L15_IMG3CO_D1 MTK_M4U_ID(SMI_L15_ID, 8)
+#define M4U_PORT_L15_IMG2O_D1 MTK_M4U_ID(SMI_L15_ID, 9)
+#define M4U_PORT_L15_SMTI_D9 MTK_M4U_ID(SMI_L15_ID, 10)
+#define M4U_PORT_L15_SMTO_D4 MTK_M4U_ID(SMI_L15_ID, 11)
+#define M4U_PORT_L15_FEO_D1 MTK_M4U_ID(SMI_L15_ID, 12)
+#define M4U_PORT_L15_TNCSO_D1 MTK_M4U_ID(SMI_L15_ID, 13)
+#define M4U_PORT_L15_TNCSTO_D1 MTK_M4U_ID(SMI_L15_ID, 14)
+#define M4U_PORT_L15_SMTO_D6 MTK_M4U_ID(SMI_L15_ID, 15)
+#define M4U_PORT_L15_SMTO_D9 MTK_M4U_ID(SMI_L15_ID, 16)
+#define M4U_PORT_L15_TNCO_D1 MTK_M4U_ID(SMI_L15_ID, 17)
+#define M4U_PORT_L15_TNCO_D1_N MTK_M4U_ID(SMI_L15_ID, 18)
+
+/* LARB 16A -- CAM */
+#define M4U_PORT_L16A_IMGO_R1 MTK_M4U_ID(SMI_L16A_ID, 0)
+#define M4U_PORT_L16A_CQI_R1 MTK_M4U_ID(SMI_L16A_ID, 1)
+#define M4U_PORT_L16A_CQI_R2 MTK_M4U_ID(SMI_L16A_ID, 2)
+#define M4U_PORT_L16A_BPCI_R1 MTK_M4U_ID(SMI_L16A_ID, 3)
+#define M4U_PORT_L16A_LSCI_R1 MTK_M4U_ID(SMI_L16A_ID, 4)
+#define M4U_PORT_L16A_RAWI_R2 MTK_M4U_ID(SMI_L16A_ID, 5)
+#define M4U_PORT_L16A_RAWI_R3 MTK_M4U_ID(SMI_L16A_ID, 6)
+#define M4U_PORT_L16A_UFDI_R2 MTK_M4U_ID(SMI_L16A_ID, 7)
+#define M4U_PORT_L16A_UFDI_R3 MTK_M4U_ID(SMI_L16A_ID, 8)
+#define M4U_PORT_L16A_RAWI_R4 MTK_M4U_ID(SMI_L16A_ID, 9)
+#define M4U_PORT_L16A_RAWI_R5 MTK_M4U_ID(SMI_L16A_ID, 10)
+#define M4U_PORT_L16A_AAI_R1 MTK_M4U_ID(SMI_L16A_ID, 11)
+#define M4U_PORT_L16A_UFDI_R5 MTK_M4U_ID(SMI_L16A_ID, 12)
+#define M4U_PORT_L16A_FHO_R1 MTK_M4U_ID(SMI_L16A_ID, 13)
+#define M4U_PORT_L16A_AAO_R1 MTK_M4U_ID(SMI_L16A_ID, 14)
+#define M4U_PORT_L16A_TSFSO_R1 MTK_M4U_ID(SMI_L16A_ID, 15)
+#define M4U_PORT_L16A_FLKO_R1 MTK_M4U_ID(SMI_L16A_ID, 16)
+
+/* LARB 16B -- CAM */
+#define M4U_PORT_L16B_IMGO_R1 MTK_M4U_ID(SMI_L16B_ID, 0)
+#define M4U_PORT_L16B_CQI_R1 MTK_M4U_ID(SMI_L16B_ID, 1)
+#define M4U_PORT_L16B_CQI_R2 MTK_M4U_ID(SMI_L16B_ID, 2)
+#define M4U_PORT_L16B_BPCI_R1 MTK_M4U_ID(SMI_L16B_ID, 3)
+#define M4U_PORT_L16B_LSCI_R1 MTK_M4U_ID(SMI_L16B_ID, 4)
+#define M4U_PORT_L16B_RAWI_R2 MTK_M4U_ID(SMI_L16B_ID, 5)
+#define M4U_PORT_L16B_RAWI_R3 MTK_M4U_ID(SMI_L16B_ID, 6)
+#define M4U_PORT_L16B_UFDI_R2 MTK_M4U_ID(SMI_L16B_ID, 7)
+#define M4U_PORT_L16B_UFDI_R3 MTK_M4U_ID(SMI_L16B_ID, 8)
+#define M4U_PORT_L16B_RAWI_R4 MTK_M4U_ID(SMI_L16B_ID, 9)
+#define M4U_PORT_L16B_RAWI_R5 MTK_M4U_ID(SMI_L16B_ID, 10)
+#define M4U_PORT_L16B_AAI_R1 MTK_M4U_ID(SMI_L16B_ID, 11)
+#define M4U_PORT_L16B_UFDI_R5 MTK_M4U_ID(SMI_L16B_ID, 12)
+#define M4U_PORT_L16B_FHO_R1 MTK_M4U_ID(SMI_L16B_ID, 13)
+#define M4U_PORT_L16B_AAO_R1 MTK_M4U_ID(SMI_L16B_ID, 14)
+#define M4U_PORT_L16B_TSFSO_R1 MTK_M4U_ID(SMI_L16B_ID, 15)
+#define M4U_PORT_L16B_FLKO_R1 MTK_M4U_ID(SMI_L16B_ID, 16)
+
+/* LARB 17A -- CAM */
+#define M4U_PORT_L17A_YUVO_R1 MTK_M4U_ID(SMI_L17A_ID, 0)
+#define M4U_PORT_L17A_YUVO_R3 MTK_M4U_ID(SMI_L17A_ID, 1)
+#define M4U_PORT_L17A_YUVCO_R1 MTK_M4U_ID(SMI_L17A_ID, 2)
+#define M4U_PORT_L17A_YUVO_R2 MTK_M4U_ID(SMI_L17A_ID, 3)
+#define M4U_PORT_L17A_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17A_ID, 4)
+#define M4U_PORT_L17A_DRZS4NO_R1 MTK_M4U_ID(SMI_L17A_ID, 5)
+#define M4U_PORT_L17A_TNCSO_R1 MTK_M4U_ID(SMI_L17A_ID, 6)
+
+/* LARB 17B -- CAM */
+#define M4U_PORT_L17B_YUVO_R1 MTK_M4U_ID(SMI_L17B_ID, 0)
+#define M4U_PORT_L17B_YUVO_R3 MTK_M4U_ID(SMI_L17B_ID, 1)
+#define M4U_PORT_L17B_YUVCO_R1 MTK_M4U_ID(SMI_L17B_ID, 2)
+#define M4U_PORT_L17B_YUVO_R2 MTK_M4U_ID(SMI_L17B_ID, 3)
+#define M4U_PORT_L17B_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17B_ID, 4)
+#define M4U_PORT_L17B_DRZS4NO_R1 MTK_M4U_ID(SMI_L17B_ID, 5)
+#define M4U_PORT_L17B_TNCSO_R1 MTK_M4U_ID(SMI_L17B_ID, 6)
+
+/* LARB 19 -- VENC */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(SMI_L19_ID, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(SMI_L19_ID, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(SMI_L19_ID, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(SMI_L19_ID, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(SMI_L19_ID, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L19_ID, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(SMI_L19_ID, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(SMI_L19_ID, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L19_ID, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA_0 MTK_M4U_ID(SMI_L19_ID, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA_0 MTK_M4U_ID(SMI_L19_ID, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(SMI_L19_ID, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA_1 MTK_M4U_ID(SMI_L19_ID, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA_1 MTK_M4U_ID(SMI_L19_ID, 19)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_1 MTK_M4U_ID(SMI_L19_ID, 20)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_0 MTK_M4U_ID(SMI_L19_ID, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(SMI_L19_ID, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L19_ID, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(SMI_L19_ID, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(SMI_L19_ID, 25)
+#define M4U_PORT_L19_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L19_ID, 26)
+
+/* LARB 21 -- VDEC-CORE0 */
+#define M4U_PORT_L21_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L21_ID, 0)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L21_ID, 1)
+#define M4U_PORT_L21_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L21_ID, 2)
+#define M4U_PORT_L21_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L21_ID, 3)
+#define M4U_PORT_L21_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L21_ID, 4)
+#define M4U_PORT_L21_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L21_ID, 5)
+#define M4U_PORT_L21_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L21_ID, 6)
+#define M4U_PORT_L21_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L21_ID, 7)
+#define M4U_PORT_L21_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L21_ID, 8)
+#define M4U_PORT_L21_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L21_ID, 9)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT_C MTK_M4U_ID(SMI_L21_ID, 10)
+
+/* LARB 23 -- VDEC-SOC */
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD_EXT MTK_M4U_ID(SMI_L23_ID, 0)
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(SMI_L23_ID, 1)
+#define M4U_PORT_L23_HW_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(SMI_L23_ID, 2)
+#define M4U_PORT_L23_HW_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(SMI_L23_ID, 3)
+#define M4U_PORT_L23_HW_VDEC_LAT0_TILE_EXT MTK_M4U_ID(SMI_L23_ID, 4)
+#define M4U_PORT_L23_HW_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(SMI_L23_ID, 5)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L23_ID, 6)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT_C MTK_M4U_ID(SMI_L23_ID, 7)
+#define M4U_PORT_L23_HW_VDEC_MC_EXT_C MTK_M4U_ID(SMI_L23_ID, 8)
+
+/* LARB 27 -- CCU */
+#define M4U_PORT_L27_CCUI MTK_M4U_ID(SMI_L27_ID, 0)
+#define M4U_PORT_L27_CCUO MTK_M4U_ID(SMI_L27_ID, 1)
+#define M4U_PORT_L27_CCUI2 MTK_M4U_ID(SMI_L27_ID, 2)
+#define M4U_PORT_L27_CCUO2 MTK_M4U_ID(SMI_L27_ID, 3)
+
+/* LARB 28 -- AXI-CCU */
+#define M4U_PORT_L28_CCU_AXI_0 MTK_M4U_ID(SMI_L28_ID, 0)
+
+/* infra/peri */
+#define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0)
+
+#endif
diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h
index 347e55e89a2a..6e60d55491b3 100644
--- a/include/dt-bindings/memory/tegra234-mc.h
+++ b/include/dt-bindings/memory/tegra234-mc.h
@@ -536,4 +536,9 @@
#define TEGRA234_MEMORY_CLIENT_NVJPG1SRD 0x123
#define TEGRA234_MEMORY_CLIENT_NVJPG1SWR 0x124
+/* ICC ID's for dummy MC clients used to represent CPU Clusters */
+#define TEGRA_ICC_MC_CPU_CLUSTER0 1003
+#define TEGRA_ICC_MC_CPU_CLUSTER1 1004
+#define TEGRA_ICC_MC_CPU_CLUSTER2 1005
+
#endif
diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h
index a90f3613c584..8d73a9c51e2b 100644
--- a/include/dt-bindings/mfd/stm32f7-rcc.h
+++ b/include/dt-bindings/mfd/stm32f7-rcc.h
@@ -64,6 +64,7 @@
#define STM32F7_RCC_APB1_TIM14 8
#define STM32F7_RCC_APB1_LPTIM1 9
#define STM32F7_RCC_APB1_WWDG 11
+#define STM32F7_RCC_APB1_CAN3 13
#define STM32F7_RCC_APB1_SPI2 14
#define STM32F7_RCC_APB1_SPI3 15
#define STM32F7_RCC_APB1_SPDIFRX 16
diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
index d3116c52ab72..b0b1091aad6d 100644
--- a/include/dt-bindings/mux/ti-serdes.h
+++ b/include/dt-bindings/mux/ti-serdes.h
@@ -6,6 +6,14 @@
#ifndef _DT_BINDINGS_MUX_TI_SERDES
#define _DT_BINDINGS_MUX_TI_SERDES
+/*
+ * These bindings are deprecated, because they do not match the actual
+ * concept of bindings but rather contain pure constants values used only
+ * in DTS board files.
+ * Instead include the header in the DTS source directory.
+ */
+#warning "These bindings are deprecated. Instead, use the header in the DTS source directory."
+
/* J721E */
#define J721E_SERDES0_LANE0_QSGMII_LANE1 0x0
@@ -117,4 +125,66 @@
#define J721S2_SERDES0_LANE3_USB 0x2
#define J721S2_SERDES0_LANE3_IP4_UNUSED 0x3
+/* J784S4 */
+
+#define J784S4_SERDES0_LANE0_IP1_UNUSED 0x0
+#define J784S4_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J784S4_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE1_IP1_UNUSED 0x0
+#define J784S4_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J784S4_SERDES0_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE2_PCIE3_LANE0 0x0
+#define J784S4_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J784S4_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE3_PCIE3_LANE1 0x0
+#define J784S4_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J784S4_SERDES0_LANE3_USB 0x2
+#define J784S4_SERDES0_LANE3_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE0_QSGMII_LANE3 0x0
+#define J784S4_SERDES1_LANE0_PCIE0_LANE0 0x1
+#define J784S4_SERDES1_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES1_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE1_QSGMII_LANE4 0x0
+#define J784S4_SERDES1_LANE1_PCIE0_LANE1 0x1
+#define J784S4_SERDES1_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES1_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE2_QSGMII_LANE1 0x0
+#define J784S4_SERDES1_LANE2_PCIE0_LANE2 0x1
+#define J784S4_SERDES1_LANE2_PCIE2_LANE0 0x2
+#define J784S4_SERDES1_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE3_QSGMII_LANE2 0x0
+#define J784S4_SERDES1_LANE3_PCIE0_LANE3 0x1
+#define J784S4_SERDES1_LANE3_PCIE2_LANE1 0x2
+#define J784S4_SERDES1_LANE3_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE0_QSGMII_LANE5 0x0
+#define J784S4_SERDES2_LANE0_IP2_UNUSED 0x1
+#define J784S4_SERDES2_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE1_QSGMII_LANE6 0x0
+#define J784S4_SERDES2_LANE1_IP2_UNUSED 0x1
+#define J784S4_SERDES2_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE2_QSGMII_LANE7 0x0
+#define J784S4_SERDES2_LANE2_QSGMII_LANE1 0x1
+#define J784S4_SERDES2_LANE2_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE3_QSGMII_LANE8 0x0
+#define J784S4_SERDES2_LANE3_QSGMII_LANE2 0x1
+#define J784S4_SERDES2_LANE3_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE3_IP4_UNUSED 0x3
+
#endif /* _DT_BINDINGS_MUX_TI_SERDES */
diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h
deleted file mode 100644
index b5aca149664e..000000000000
--- a/include/dt-bindings/pinctrl/k3.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for pinctrl bindings for TI's K3 SoC
- * family.
- *
- * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/
- */
-#ifndef _DT_BINDINGS_PINCTRL_TI_K3_H
-#define _DT_BINDINGS_PINCTRL_TI_K3_H
-
-/*
- * These bindings are deprecated, because they do not match the actual
- * concept of bindings but rather contain pure register values.
- * Instead include the header in the DTS source directory.
- */
-#warning "These bindings are deprecated. Instead, use the header in the DTS source directory."
-
-#define PULLUDEN_SHIFT (16)
-#define PULLTYPESEL_SHIFT (17)
-#define RXACTIVE_SHIFT (18)
-
-#define PULL_DISABLE (1 << PULLUDEN_SHIFT)
-#define PULL_ENABLE (0 << PULLUDEN_SHIFT)
-
-#define PULL_UP (1 << PULLTYPESEL_SHIFT | PULL_ENABLE)
-#define PULL_DOWN (0 << PULLTYPESEL_SHIFT | PULL_ENABLE)
-
-#define INPUT_EN (1 << RXACTIVE_SHIFT)
-#define INPUT_DISABLE (0 << RXACTIVE_SHIFT)
-
-/* Only these macros are expected be used directly in device tree files */
-#define PIN_OUTPUT (INPUT_DISABLE | PULL_DISABLE)
-#define PIN_OUTPUT_PULLUP (INPUT_DISABLE | PULL_UP)
-#define PIN_OUTPUT_PULLDOWN (INPUT_DISABLE | PULL_DOWN)
-#define PIN_INPUT (INPUT_EN | PULL_DISABLE)
-#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP)
-#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN)
-
-#define AM62AX_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM62AX_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#define AM62X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM62X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#define AM64X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM64X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#define J721S2_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define J721S2_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#define J784S4_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define J784S4_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#endif
diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h
index e6fb8ada3f4d..28ad0235086a 100644
--- a/include/dt-bindings/pinctrl/stm32-pinfunc.h
+++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h
@@ -37,6 +37,9 @@
#define STM32MP_PKG_AB 0x2
#define STM32MP_PKG_AC 0x4
#define STM32MP_PKG_AD 0x8
+#define STM32MP_PKG_AI 0x100
+#define STM32MP_PKG_AK 0x400
+#define STM32MP_PKG_AL 0x800
#endif /* _DT_BINDINGS_STM32_PINFUNC_H */
diff --git a/include/dt-bindings/power/amlogic,c3-pwrc.h b/include/dt-bindings/power/amlogic,c3-pwrc.h
new file mode 100644
index 000000000000..1d98a25b08a4
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,c3-pwrc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc.
+ * Author: hongyu chen1 <hongyu.chen1@amlogic.com>
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_C3_POWER_H
+#define _DT_BINDINGS_AMLOGIC_C3_POWER_H
+
+#define PWRC_C3_NNA_ID 0
+#define PWRC_C3_AUDIO_ID 1
+#define PWRC_C3_RESV_SEC_ID 2
+#define PWRC_C3_SDIOA_ID 3
+#define PWRC_C3_EMMC_ID 4
+#define PWRC_C3_USB_COMB_ID 5
+#define PWRC_C3_SDCARD_ID 6
+#define PWRC_C3_ETH_ID 7
+#define PWRC_C3_RESV0_ID 8
+#define PWRC_C3_GE2D_ID 9
+#define PWRC_C3_CVE_ID 10
+#define PWRC_C3_GDC_WRAP_ID 11
+#define PWRC_C3_ISP_TOP_ID 12
+#define PWRC_C3_MIPI_ISP_WRAP_ID 13
+#define PWRC_C3_VCODEC_ID 14
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,t7-pwrc.h b/include/dt-bindings/power/amlogic,t7-pwrc.h
new file mode 100644
index 000000000000..1f1f2739cc26
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,t7-pwrc.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc.
+ * Author: Hongyu Chen <hongyu.chen1@amlogic.com>
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_T7_POWER_H
+#define _DT_BINDINGS_AMLOGIC_T7_POWER_H
+
+#define PWRC_T7_DSPA_ID 0
+#define PWRC_T7_DSPB_ID 1
+#define PWRC_T7_DOS_HCODEC_ID 2
+#define PWRC_T7_DOS_HEVC_ID 3
+#define PWRC_T7_DOS_VDEC_ID 4
+#define PWRC_T7_DOS_WAVE_ID 5
+#define PWRC_T7_VPU_HDMI_ID 6
+#define PWRC_T7_USB_COMB_ID 7
+#define PWRC_T7_PCIE_ID 8
+#define PWRC_T7_GE2D_ID 9
+#define PWRC_T7_SRAMA_ID 10
+#define PWRC_T7_SRAMB_ID 11
+#define PWRC_T7_HDMIRX_ID 12
+#define PWRC_T7_VI_CLK1_ID 13
+#define PWRC_T7_VI_CLK2_ID 14
+#define PWRC_T7_ETH_ID 15
+#define PWRC_T7_ISP_ID 16
+#define PWRC_T7_MIPI_ISP_ID 17
+#define PWRC_T7_GDC_ID 18
+#define PWRC_T7_CVE_ID 18
+#define PWRC_T7_DEWARP_ID 19
+#define PWRC_T7_SDIO_A_ID 20
+#define PWRC_T7_SDIO_B_ID 21
+#define PWRC_T7_EMMC_ID 22
+#define PWRC_T7_MALI_SC0_ID 23
+#define PWRC_T7_MALI_SC1_ID 24
+#define PWRC_T7_MALI_SC2_ID 25
+#define PWRC_T7_MALI_SC3_ID 26
+#define PWRC_T7_MALI_TOP_ID 27
+#define PWRC_T7_NNA_CORE0_ID 28
+#define PWRC_T7_NNA_CORE1_ID 29
+#define PWRC_T7_NNA_CORE2_ID 30
+#define PWRC_T7_NNA_CORE3_ID 31
+#define PWRC_T7_NNA_TOP_ID 32
+#define PWRC_T7_DDR0_ID 33
+#define PWRC_T7_DDR1_ID 34
+#define PWRC_T7_DMC0_ID 35
+#define PWRC_T7_DMC1_ID 36
+#define PWRC_T7_NOC_ID 37
+#define PWRC_T7_NIC2_ID 38
+#define PWRC_T7_NIC3_ID 39
+#define PWRC_T7_CCI_ID 40
+#define PWRC_T7_MIPI_DSI0_ID 41
+#define PWRC_T7_SPICC0_ID 42
+#define PWRC_T7_SPICC1_ID 43
+#define PWRC_T7_SPICC2_ID 44
+#define PWRC_T7_SPICC3_ID 45
+#define PWRC_T7_SPICC4_ID 46
+#define PWRC_T7_SPICC5_ID 47
+#define PWRC_T7_EDP0_ID 48
+#define PWRC_T7_EDP1_ID 49
+#define PWRC_T7_MIPI_DSI1_ID 50
+#define PWRC_T7_AUDIO_ID 51
+
+#endif
diff --git a/include/dt-bindings/power/mediatek,mt8365-power.h b/include/dt-bindings/power/mediatek,mt8365-power.h
new file mode 100644
index 000000000000..e6cfd0ec7871
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8365-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8365_POWER_H
+#define _DT_BINDINGS_POWER_MT8365_POWER_H
+
+#define MT8365_POWER_DOMAIN_MM 0
+#define MT8365_POWER_DOMAIN_CONN 1
+#define MT8365_POWER_DOMAIN_MFG 2
+#define MT8365_POWER_DOMAIN_AUDIO 3
+#define MT8365_POWER_DOMAIN_CAM 4
+#define MT8365_POWER_DOMAIN_DSP 5
+#define MT8365_POWER_DOMAIN_VDEC 6
+#define MT8365_POWER_DOMAIN_VENC 7
+#define MT8365_POWER_DOMAIN_APU 8
+
+#endif /* _DT_BINDINGS_POWER_MT8365_POWER_H */
diff --git a/include/dt-bindings/power/meson-a1-power.h b/include/dt-bindings/power/meson-a1-power.h
index 6cf50bfb8ccf..724c370d6853 100644
--- a/include/dt-bindings/power/meson-a1-power.h
+++ b/include/dt-bindings/power/meson-a1-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 Amlogic, Inc.
* Author: Jianxin Pan <jianxin.pan@amlogic.com>
diff --git a/include/dt-bindings/power/meson-axg-power.h b/include/dt-bindings/power/meson-axg-power.h
index e5243884b249..ace0e468ce21 100644
--- a/include/dt-bindings/power/meson-axg-power.h
+++ b/include/dt-bindings/power/meson-axg-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson-g12a-power.h b/include/dt-bindings/power/meson-g12a-power.h
index 93b03bdd60b7..01fd0ac4dd08 100644
--- a/include/dt-bindings/power/meson-g12a-power.h
+++ b/include/dt-bindings/power/meson-g12a-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
@@ -10,5 +10,6 @@
#define PWRC_G12A_VPU_ID 0
#define PWRC_G12A_ETH_ID 1
#define PWRC_G12A_NNA_ID 2
+#define PWRC_G12A_ISP_ID 3
#endif
diff --git a/include/dt-bindings/power/meson-gxbb-power.h b/include/dt-bindings/power/meson-gxbb-power.h
index 1262dac696c0..8d0b32b6c02c 100644
--- a/include/dt-bindings/power/meson-gxbb-power.h
+++ b/include/dt-bindings/power/meson-gxbb-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson-s4-power.h b/include/dt-bindings/power/meson-s4-power.h
index 462dd2cb938b..f210a524a592 100644
--- a/include/dt-bindings/power/meson-s4-power.h
+++ b/include/dt-bindings/power/meson-s4-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2021 Amlogic, Inc.
* Author: Shunzhou Jiang <shunzhou.jiang@amlogic.com>
diff --git a/include/dt-bindings/power/meson-sm1-power.h b/include/dt-bindings/power/meson-sm1-power.h
index a020ab00c134..d78e710dbfff 100644
--- a/include/dt-bindings/power/meson-sm1-power.h
+++ b/include/dt-bindings/power/meson-sm1-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson8-power.h b/include/dt-bindings/power/meson8-power.h
index dd8b2ddb82a7..7a55ba2cd22e 100644
--- a/include/dt-bindings/power/meson8-power.h
+++ b/include/dt-bindings/power/meson8-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*/
diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h
new file mode 100644
index 000000000000..e54ffa361451
--- /dev/null
+++ b/include/dt-bindings/power/qcom,rpmhpd.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_POWER_QCOM_RPMHPD_H
+#define _DT_BINDINGS_POWER_QCOM_RPMHPD_H
+
+/* Generic RPMH Power Domain Indexes */
+#define RPMHPD_CX 0
+#define RPMHPD_CX_AO 1
+#define RPMHPD_EBI 2
+#define RPMHPD_GFX 3
+#define RPMHPD_LCX 4
+#define RPMHPD_LMX 5
+#define RPMHPD_MMCX 6
+#define RPMHPD_MMCX_AO 7
+#define RPMHPD_MX 8
+#define RPMHPD_MX_AO 9
+#define RPMHPD_MXC 10
+#define RPMHPD_MXC_AO 11
+#define RPMHPD_MSS 12
+#define RPMHPD_NSP 13
+#define RPMHPD_NSP0 14
+#define RPMHPD_NSP1 15
+#define RPMHPD_QPHY 16
+#define RPMHPD_DDR 17
+#define RPMHPD_XO 18
+#define RPMHPD_NSP2 19
+#define RPMHPD_GMXC 20
+
+#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 867b18e041ea..7f4e2983a4c5 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -216,20 +216,30 @@
#define SC8280XP_XO 15
/* SDM845 Power Domain performance levels */
-#define RPMH_REGULATOR_LEVEL_RETENTION 16
-#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
-#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
-#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
-#define RPMH_REGULATOR_LEVEL_LOW_SVS_L1 80
-#define RPMH_REGULATOR_LEVEL_SVS 128
-#define RPMH_REGULATOR_LEVEL_SVS_L0 144
-#define RPMH_REGULATOR_LEVEL_SVS_L1 192
-#define RPMH_REGULATOR_LEVEL_SVS_L2 224
-#define RPMH_REGULATOR_LEVEL_NOM 256
-#define RPMH_REGULATOR_LEVEL_NOM_L1 320
-#define RPMH_REGULATOR_LEVEL_NOM_L2 336
-#define RPMH_REGULATOR_LEVEL_TURBO 384
-#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+#define RPMH_REGULATOR_LEVEL_RETENTION 16
+#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2 52
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D0 60
+#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_P1 72
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L1 80
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L2 96
+#define RPMH_REGULATOR_LEVEL_SVS 128
+#define RPMH_REGULATOR_LEVEL_SVS_L0 144
+#define RPMH_REGULATOR_LEVEL_SVS_L1 192
+#define RPMH_REGULATOR_LEVEL_SVS_L2 224
+#define RPMH_REGULATOR_LEVEL_NOM 256
+#define RPMH_REGULATOR_LEVEL_NOM_L0 288
+#define RPMH_REGULATOR_LEVEL_NOM_L1 320
+#define RPMH_REGULATOR_LEVEL_NOM_L2 336
+#define RPMH_REGULATOR_LEVEL_TURBO 384
+#define RPMH_REGULATOR_LEVEL_TURBO_L0 400
+#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+#define RPMH_REGULATOR_LEVEL_TURBO_L2 432
+#define RPMH_REGULATOR_LEVEL_TURBO_L3 448
+#define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464
+#define RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR 480
/* MDM9607 Power Domains */
#define MDM9607_VDDCX 0
@@ -268,6 +278,27 @@
#define MSM8909_VDDMX MSM8916_VDDMX
#define MSM8909_VDDMX_AO MSM8916_VDDMX_AO
+/* MSM8917 Power Domain Indexes */
+#define MSM8917_VDDCX 0
+#define MSM8917_VDDCX_AO 1
+#define MSM8917_VDDCX_VFL 2
+#define MSM8917_VDDMX 3
+#define MSM8917_VDDMX_AO 4
+
+/* MSM8937 Power Domain Indexes */
+#define MSM8937_VDDCX MSM8917_VDDCX
+#define MSM8937_VDDCX_AO MSM8917_VDDCX_AO
+#define MSM8937_VDDCX_VFL MSM8917_VDDCX_VFL
+#define MSM8937_VDDMX MSM8917_VDDMX
+#define MSM8937_VDDMX_AO MSM8917_VDDMX_AO
+
+/* QM215 Power Domain Indexes */
+#define QM215_VDDCX MSM8917_VDDCX
+#define QM215_VDDCX_AO MSM8917_VDDCX_AO
+#define QM215_VDDCX_VFL MSM8917_VDDCX_VFL
+#define QM215_VDDMX MSM8917_VDDMX
+#define QM215_VDDMX_AO MSM8917_VDDMX_AO
+
/* MSM8953 Power Domain Indexes */
#define MSM8953_VDDMD 0
#define MSM8953_VDDMD_AO 1
diff --git a/include/dt-bindings/power/r8a779f0-sysc.h b/include/dt-bindings/power/r8a779f0-sysc.h
index 0ec8ad727ed9..cde1536e9ed0 100644
--- a/include/dt-bindings/power/r8a779f0-sysc.h
+++ b/include/dt-bindings/power/r8a779f0-sysc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
diff --git a/include/dt-bindings/power/rk3588-power.h b/include/dt-bindings/power/rk3588-power.h
index 1b92fec013cb..6b91a50cc6d6 100644
--- a/include/dt-bindings/power/rk3588-power.h
+++ b/include/dt-bindings/power/rk3588-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
#ifndef __DT_BINDINGS_POWER_RK3588_POWER_H__
#define __DT_BINDINGS_POWER_RK3588_POWER_H__
diff --git a/include/dt-bindings/power/starfive,jh7110-pmu.h b/include/dt-bindings/power/starfive,jh7110-pmu.h
index 132bfe401fc8..7b4f24927dee 100644
--- a/include/dt-bindings/power/starfive,jh7110-pmu.h
+++ b/include/dt-bindings/power/starfive,jh7110-pmu.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/*
- * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ * Copyright (C) 2022-2023 StarFive Technology Co., Ltd.
* Author: Walker Chen <walker.chen@starfivetech.com>
*/
#ifndef __DT_BINDINGS_POWER_JH7110_POWER_H__
@@ -14,4 +14,8 @@
#define JH7110_PD_ISP 5
#define JH7110_PD_VENC 6
+/* AON Power Domain */
+#define JH7110_AON_PD_DPHY_TX 0
+#define JH7110_AON_PD_DPHY_RX 1
+
#endif
diff --git a/include/dt-bindings/power/summit,smb347-charger.h b/include/dt-bindings/power/summit,smb347-charger.h
index 3205699b5e41..14f2f9cf2020 100644
--- a/include/dt-bindings/power/summit,smb347-charger.h
+++ b/include/dt-bindings/power/summit,smb347-charger.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0-or-later or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) */
/*
* Author: David Heidelberg <david@ixit.cz>
*/
diff --git a/include/dt-bindings/regulator/st,stm32mp13-regulator.h b/include/dt-bindings/regulator/st,stm32mp13-regulator.h
new file mode 100644
index 000000000000..b3a974dfc585
--- /dev/null
+++ b/include/dt-bindings/regulator/st,stm32mp13-regulator.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H
+#define __DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H
+
+/* SCMI voltage domains identifiers */
+
+/* SOC Internal regulators */
+#define VOLTD_SCMI_REG11 0
+#define VOLTD_SCMI_REG18 1
+#define VOLTD_SCMI_USB33 2
+#define VOLTD_SCMI_SDMMC1_IO 3
+#define VOLTD_SCMI_SDMMC2_IO 4
+#define VOLTD_SCMI_VREFBUF 5
+
+/* STPMIC1 regulators */
+#define VOLTD_SCMI_STPMIC1_BUCK1 6
+#define VOLTD_SCMI_STPMIC1_BUCK2 7
+#define VOLTD_SCMI_STPMIC1_BUCK3 8
+#define VOLTD_SCMI_STPMIC1_BUCK4 9
+#define VOLTD_SCMI_STPMIC1_LDO1 10
+#define VOLTD_SCMI_STPMIC1_LDO2 11
+#define VOLTD_SCMI_STPMIC1_LDO3 12
+#define VOLTD_SCMI_STPMIC1_LDO4 13
+#define VOLTD_SCMI_STPMIC1_LDO5 14
+#define VOLTD_SCMI_STPMIC1_LDO6 15
+#define VOLTD_SCMI_STPMIC1_VREFDDR 16
+#define VOLTD_SCMI_STPMIC1_BOOST 17
+#define VOLTD_SCMI_STPMIC1_PWR_SW1 18
+#define VOLTD_SCMI_STPMIC1_PWR_SW2 19
+
+/* External regulators */
+#define VOLTD_SCMI_REGU0 20
+#define VOLTD_SCMI_REGU1 21
+#define VOLTD_SCMI_REGU2 22
+#define VOLTD_SCMI_REGU3 23
+#define VOLTD_SCMI_REGU4 24
+
+#endif /*__DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H */
diff --git a/include/dt-bindings/reset/altr,rst-mgr-s10.h b/include/dt-bindings/reset/altr,rst-mgr-s10.h
index 70ea3a09dbe1..04c4d0c6fd34 100644
--- a/include/dt-bindings/reset/altr,rst-mgr-s10.h
+++ b/include/dt-bindings/reset/altr,rst-mgr-s10.h
@@ -63,12 +63,15 @@
#define I2C2_RESET 74
#define I2C3_RESET 75
#define I2C4_RESET 76
-/* 77-79 is empty */
+#define I3C0_RESET 77
+#define I3C1_RESET 78
+/* 79 is empty */
#define UART0_RESET 80
#define UART1_RESET 81
/* 82-87 is empty */
#define GPIO0_RESET 88
#define GPIO1_RESET 89
+#define WATCHDOG4_RESET 90
/* BRGMODRST */
#define SOC2FPGA_RESET 96
diff --git a/include/dt-bindings/reset/amlogic,c3-reset.h b/include/dt-bindings/reset/amlogic,c3-reset.h
new file mode 100644
index 000000000000..d9127863f603
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,c3-reset.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_C3_RESET_H
+#define _DT_BINDINGS_AMLOGIC_C3_RESET_H
+
+/* RESET0 */
+/* 0-3 */
+#define RESET_USBCTRL 4
+/* 5-7 */
+#define RESET_USBPHY20 8
+/* 9 */
+#define RESET_USB2DRD 10
+#define RESET_MIPI_DSI_HOST 11
+#define RESET_MIPI_DSI_PHY 12
+/* 13-20 */
+#define RESET_GE2D 21
+#define RESET_DWAP 22
+/* 23-31 */
+
+/* RESET1 */
+#define RESET_AUDIO 32
+/* 33-34 */
+#define RESET_DDRAPB 35
+#define RESET_DDR 36
+#define RESET_DOS_CAPB3 37
+#define RESET_DOS 38
+/* 39-46 */
+#define RESET_NNA 47
+#define RESET_ETHERNET 48
+#define RESET_ISP 49
+#define RESET_VC9000E_APB 50
+#define RESET_VC9000E_A 51
+/* 52 */
+#define RESET_VC9000E_CORE 53
+/* 54-63 */
+
+/* RESET2 */
+#define RESET_ABUS_ARB 64
+#define RESET_IRCTRL 65
+/* 66 */
+#define RESET_TEMP_PII 67
+/* 68-72 */
+#define RESET_SPICC_0 73
+#define RESET_SPICC_1 74
+#define RESET_RSA 75
+
+/* 76-79 */
+#define RESET_MSR_CLK 80
+#define RESET_SPIFC 81
+#define RESET_SAR_ADC 82
+/* 83-87 */
+#define RESET_ACODEC 88
+/* 89-90 */
+#define RESET_WATCHDOG 91
+/* 92-95 */
+
+/* RESET3 */
+#define RESET_ISP_NIC_GPV 96
+#define RESET_ISP_NIC_MAIN 97
+#define RESET_ISP_NIC_VCLK 98
+#define RESET_ISP_NIC_VOUT 99
+#define RESET_ISP_NIC_ALL 100
+#define RESET_VOUT 101
+#define RESET_VOUT_VENC 102
+/* 103 */
+#define RESET_CVE_NIC_GPV 104
+#define RESET_CVE_NIC_MAIN 105
+#define RESET_CVE_NIC_GE2D 106
+#define RESET_CVE_NIC_DW 106
+#define RESET_CVE_NIC_CVE 108
+#define RESET_CVE_NIC_ALL 109
+#define RESET_CVE 110
+/* 112-127 */
+
+/* RESET4 */
+#define RESET_RTC 128
+#define RESET_PWM_AB 129
+#define RESET_PWM_CD 130
+#define RESET_PWM_EF 131
+#define RESET_PWM_GH 132
+#define RESET_PWM_IJ 133
+#define RESET_PWM_KL 134
+#define RESET_PWM_MN 135
+/* 136-137 */
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+#define RESET_UART_C 140
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+#define RESET_UART_F 143
+#define RESET_I2C_S_A 144
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+/* 149-151 */
+#define RESET_SD_EMMC_A 152
+#define RESET_SD_EMMC_B 153
+#define RESET_SD_EMMC_C 154
+
+/* RESET5 */
+/* 160-172 */
+#define RESET_BRG_NIC_NNA 173
+#define RESET_BRG_MUX_NIC_MAIN 174
+#define RESET_BRG_AO_NIC_ALL 175
+/* 176-183 */
+#define RESET_BRG_NIC_VAPB 184
+#define RESET_BRG_NIC_SDIO_B 185
+#define RESET_BRG_NIC_SDIO_A 186
+#define RESET_BRG_NIC_EMMC 187
+#define RESET_BRG_NIC_DSU 188
+#define RESET_BRG_NIC_SYSCLK 189
+#define RESET_BRG_NIC_MAIN 190
+#define RESET_BRG_NIC_ALL 191
+
+#endif
diff --git a/include/dt-bindings/reset/mediatek,mt7988-resets.h b/include/dt-bindings/reset/mediatek,mt7988-resets.h
new file mode 100644
index 000000000000..493301971367
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt7988-resets.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7988
+#define _DT_BINDINGS_RESET_CONTROLLER_MT7988
+
+/* ETHWARP resets */
+#define MT7988_ETHWARP_RST_SWITCH 0
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7988 */
diff --git a/include/dt-bindings/reset/mt8188-resets.h b/include/dt-bindings/reset/mt8188-resets.h
index 377cdfda82a9..5a58c54e7d20 100644
--- a/include/dt-bindings/reset/mt8188-resets.h
+++ b/include/dt-bindings/reset/mt8188-resets.h
@@ -33,4 +33,84 @@
#define MT8188_TOPRGU_SW_RST_NUM 24
+/* INFRA resets */
+#define MT8188_INFRA_RST1_THERMAL_MCU_RST 0
+#define MT8188_INFRA_RST1_THERMAL_CTRL_RST 1
+#define MT8188_INFRA_RST3_PTP_CTRL_RST 2
+
+#define MT8188_VDO0_RST_DISP_OVL0 0
+#define MT8188_VDO0_RST_FAKE_ENG0 1
+#define MT8188_VDO0_RST_DISP_CCORR0 2
+#define MT8188_VDO0_RST_DISP_MUTEX0 3
+#define MT8188_VDO0_RST_DISP_GAMMA0 4
+#define MT8188_VDO0_RST_DISP_DITHER0 5
+#define MT8188_VDO0_RST_DISP_WDMA0 6
+#define MT8188_VDO0_RST_DISP_RDMA0 7
+#define MT8188_VDO0_RST_DSI0 8
+#define MT8188_VDO0_RST_DSI1 9
+#define MT8188_VDO0_RST_DSC_WRAP0 10
+#define MT8188_VDO0_RST_VPP_MERGE0 11
+#define MT8188_VDO0_RST_DP_INTF0 12
+#define MT8188_VDO0_RST_DISP_AAL0 13
+#define MT8188_VDO0_RST_INLINEROT0 14
+#define MT8188_VDO0_RST_APB_BUS 15
+#define MT8188_VDO0_RST_DISP_COLOR0 16
+#define MT8188_VDO0_RST_MDP_WROT0 17
+#define MT8188_VDO0_RST_DISP_RSZ0 18
+
+#define MT8188_VDO1_RST_SMI_LARB2 0
+#define MT8188_VDO1_RST_SMI_LARB3 1
+#define MT8188_VDO1_RST_GALS 2
+#define MT8188_VDO1_RST_FAKE_ENG0 3
+#define MT8188_VDO1_RST_FAKE_ENG1 4
+#define MT8188_VDO1_RST_MDP_RDMA0 5
+#define MT8188_VDO1_RST_MDP_RDMA1 6
+#define MT8188_VDO1_RST_MDP_RDMA2 7
+#define MT8188_VDO1_RST_MDP_RDMA3 8
+#define MT8188_VDO1_RST_VPP_MERGE0 9
+#define MT8188_VDO1_RST_VPP_MERGE1 10
+#define MT8188_VDO1_RST_VPP_MERGE2 11
+#define MT8188_VDO1_RST_VPP_MERGE3 12
+#define MT8188_VDO1_RST_VPP_MERGE4 13
+#define MT8188_VDO1_RST_VPP2_TO_VDO1_DL_ASYNC 14
+#define MT8188_VDO1_RST_VPP3_TO_VDO1_DL_ASYNC 15
+#define MT8188_VDO1_RST_DISP_MUTEX 16
+#define MT8188_VDO1_RST_MDP_RDMA4 17
+#define MT8188_VDO1_RST_MDP_RDMA5 18
+#define MT8188_VDO1_RST_MDP_RDMA6 19
+#define MT8188_VDO1_RST_MDP_RDMA7 20
+#define MT8188_VDO1_RST_DP_INTF1_MMCK 21
+#define MT8188_VDO1_RST_DPI0_MM_CK 22
+#define MT8188_VDO1_RST_DPI1_MM_CK 23
+#define MT8188_VDO1_RST_MERGE0_DL_ASYNC 24
+#define MT8188_VDO1_RST_MERGE1_DL_ASYNC 25
+#define MT8188_VDO1_RST_MERGE2_DL_ASYNC 26
+#define MT8188_VDO1_RST_MERGE3_DL_ASYNC 27
+#define MT8188_VDO1_RST_MERGE4_DL_ASYNC 28
+#define MT8188_VDO1_RST_VDO0_DSC_TO_VDO1_DL_ASYNC 29
+#define MT8188_VDO1_RST_VDO0_MERGE_TO_VDO1_DL_ASYNC 30
+#define MT8188_VDO1_RST_PADDING0 31
+#define MT8188_VDO1_RST_PADDING1 32
+#define MT8188_VDO1_RST_PADDING2 33
+#define MT8188_VDO1_RST_PADDING3 34
+#define MT8188_VDO1_RST_PADDING4 35
+#define MT8188_VDO1_RST_PADDING5 36
+#define MT8188_VDO1_RST_PADDING6 37
+#define MT8188_VDO1_RST_PADDING7 38
+#define MT8188_VDO1_RST_DISP_RSZ0 39
+#define MT8188_VDO1_RST_DISP_RSZ1 40
+#define MT8188_VDO1_RST_DISP_RSZ2 41
+#define MT8188_VDO1_RST_DISP_RSZ3 42
+#define MT8188_VDO1_RST_HDR_VDO_FE0 43
+#define MT8188_VDO1_RST_HDR_GFX_FE0 44
+#define MT8188_VDO1_RST_HDR_VDO_BE 45
+#define MT8188_VDO1_RST_HDR_VDO_FE1 46
+#define MT8188_VDO1_RST_HDR_GFX_FE1 47
+#define MT8188_VDO1_RST_DISP_MIXER 48
+#define MT8188_VDO1_RST_HDR_VDO_FE0_DL_ASYNC 49
+#define MT8188_VDO1_RST_HDR_VDO_FE1_DL_ASYNC 50
+#define MT8188_VDO1_RST_HDR_GFX_FE0_DL_ASYNC 51
+#define MT8188_VDO1_RST_HDR_GFX_FE1_DL_ASYNC 52
+#define MT8188_VDO1_RST_HDR_VDO_BE_DL_ASYNC 53
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8188 */
diff --git a/include/dt-bindings/reset/nuvoton,ma35d1-reset.h b/include/dt-bindings/reset/nuvoton,ma35d1-reset.h
new file mode 100644
index 000000000000..2e99ee0d68c5
--- /dev/null
+++ b/include/dt-bindings/reset/nuvoton,ma35d1-reset.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Nuvoton Technologies.
+ * Author: Chi-Fen Li <cfli0@nuvoton.com>
+ *
+ * Device Tree binding constants for MA35D1 reset controller.
+ */
+
+#ifndef __DT_BINDINGS_RESET_MA35D1_H
+#define __DT_BINDINGS_RESET_MA35D1_H
+
+#define MA35D1_RESET_CHIP 0
+#define MA35D1_RESET_CA35CR0 1
+#define MA35D1_RESET_CA35CR1 2
+#define MA35D1_RESET_CM4 3
+#define MA35D1_RESET_PDMA0 4
+#define MA35D1_RESET_PDMA1 5
+#define MA35D1_RESET_PDMA2 6
+#define MA35D1_RESET_PDMA3 7
+#define MA35D1_RESET_DISP 8
+#define MA35D1_RESET_VCAP0 9
+#define MA35D1_RESET_VCAP1 10
+#define MA35D1_RESET_GFX 11
+#define MA35D1_RESET_VDEC 12
+#define MA35D1_RESET_WHC0 13
+#define MA35D1_RESET_WHC1 14
+#define MA35D1_RESET_GMAC0 15
+#define MA35D1_RESET_GMAC1 16
+#define MA35D1_RESET_HWSEM 17
+#define MA35D1_RESET_EBI 18
+#define MA35D1_RESET_HSUSBH0 19
+#define MA35D1_RESET_HSUSBH1 20
+#define MA35D1_RESET_HSUSBD 21
+#define MA35D1_RESET_USBHL 22
+#define MA35D1_RESET_SDH0 23
+#define MA35D1_RESET_SDH1 24
+#define MA35D1_RESET_NAND 25
+#define MA35D1_RESET_GPIO 26
+#define MA35D1_RESET_MCTLP 27
+#define MA35D1_RESET_MCTLC 28
+#define MA35D1_RESET_DDRPUB 29
+#define MA35D1_RESET_TMR0 30
+#define MA35D1_RESET_TMR1 31
+#define MA35D1_RESET_TMR2 32
+#define MA35D1_RESET_TMR3 33
+#define MA35D1_RESET_I2C0 34
+#define MA35D1_RESET_I2C1 35
+#define MA35D1_RESET_I2C2 36
+#define MA35D1_RESET_I2C3 37
+#define MA35D1_RESET_QSPI0 38
+#define MA35D1_RESET_SPI0 39
+#define MA35D1_RESET_SPI1 40
+#define MA35D1_RESET_SPI2 41
+#define MA35D1_RESET_UART0 42
+#define MA35D1_RESET_UART1 43
+#define MA35D1_RESET_UART2 44
+#define MA35D1_RESET_UART3 45
+#define MA35D1_RESET_UART4 46
+#define MA35D1_RESET_UART5 47
+#define MA35D1_RESET_UART6 48
+#define MA35D1_RESET_UART7 49
+#define MA35D1_RESET_CANFD0 50
+#define MA35D1_RESET_CANFD1 51
+#define MA35D1_RESET_EADC0 52
+#define MA35D1_RESET_I2S0 53
+#define MA35D1_RESET_SC0 54
+#define MA35D1_RESET_SC1 55
+#define MA35D1_RESET_QSPI1 56
+#define MA35D1_RESET_SPI3 57
+#define MA35D1_RESET_EPWM0 58
+#define MA35D1_RESET_EPWM1 59
+#define MA35D1_RESET_QEI0 60
+#define MA35D1_RESET_QEI1 61
+#define MA35D1_RESET_ECAP0 62
+#define MA35D1_RESET_ECAP1 63
+#define MA35D1_RESET_CANFD2 64
+#define MA35D1_RESET_ADC0 65
+#define MA35D1_RESET_TMR4 66
+#define MA35D1_RESET_TMR5 67
+#define MA35D1_RESET_TMR6 68
+#define MA35D1_RESET_TMR7 69
+#define MA35D1_RESET_TMR8 70
+#define MA35D1_RESET_TMR9 71
+#define MA35D1_RESET_TMR10 72
+#define MA35D1_RESET_TMR11 73
+#define MA35D1_RESET_UART8 74
+#define MA35D1_RESET_UART9 75
+#define MA35D1_RESET_UART10 76
+#define MA35D1_RESET_UART11 77
+#define MA35D1_RESET_UART12 78
+#define MA35D1_RESET_UART13 79
+#define MA35D1_RESET_UART14 80
+#define MA35D1_RESET_UART15 81
+#define MA35D1_RESET_UART16 82
+#define MA35D1_RESET_I2S1 83
+#define MA35D1_RESET_I2C4 84
+#define MA35D1_RESET_I2C5 85
+#define MA35D1_RESET_EPWM2 86
+#define MA35D1_RESET_ECAP2 87
+#define MA35D1_RESET_QEI2 88
+#define MA35D1_RESET_CANFD3 89
+#define MA35D1_RESET_KPI 90
+#define MA35D1_RESET_GIC 91
+#define MA35D1_RESET_SSMCC 92
+#define MA35D1_RESET_SSPCC 93
+#define MA35D1_RESET_COUNT 94
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq5018.h b/include/dt-bindings/reset/qcom,gcc-ipq5018.h
new file mode 100644
index 000000000000..8f03c92fc23b
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-ipq5018.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_5018_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_5018_H
+
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 0
+#define GCC_BLSP1_BCR 1
+#define GCC_BLSP1_QUP1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_QUP3_BCR 4
+#define GCC_BLSP1_UART1_BCR 5
+#define GCC_BLSP1_UART2_BCR 6
+#define GCC_BOOT_ROM_BCR 7
+#define GCC_BTSS_BCR 8
+#define GCC_CMN_BLK_BCR 9
+#define GCC_CMN_LDO_BCR 10
+#define GCC_CE_BCR 11
+#define GCC_CRYPTO_BCR 12
+#define GCC_DCC_BCR 13
+#define GCC_DCD_BCR 14
+#define GCC_DDRSS_BCR 15
+#define GCC_EDPD_BCR 16
+#define GCC_GEPHY_BCR 17
+#define GCC_GEPHY_MDC_SW_ARES 18
+#define GCC_GEPHY_DSP_HW_ARES 19
+#define GCC_GEPHY_RX_ARES 20
+#define GCC_GEPHY_TX_ARES 21
+#define GCC_GMAC0_BCR 22
+#define GCC_GMAC0_CFG_ARES 23
+#define GCC_GMAC0_SYS_ARES 24
+#define GCC_GMAC1_BCR 25
+#define GCC_GMAC1_CFG_ARES 26
+#define GCC_GMAC1_SYS_ARES 27
+#define GCC_IMEM_BCR 28
+#define GCC_LPASS_BCR 29
+#define GCC_MDIO0_BCR 30
+#define GCC_MDIO1_BCR 31
+#define GCC_MPM_BCR 32
+#define GCC_PCIE0_BCR 33
+#define GCC_PCIE0_LINK_DOWN_BCR 34
+#define GCC_PCIE0_PHY_BCR 35
+#define GCC_PCIE0PHY_PHY_BCR 36
+#define GCC_PCIE0_PIPE_ARES 37
+#define GCC_PCIE0_SLEEP_ARES 38
+#define GCC_PCIE0_CORE_STICKY_ARES 39
+#define GCC_PCIE0_AXI_MASTER_ARES 40
+#define GCC_PCIE0_AXI_SLAVE_ARES 41
+#define GCC_PCIE0_AHB_ARES 42
+#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 43
+#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 44
+#define GCC_PCIE1_BCR 45
+#define GCC_PCIE1_LINK_DOWN_BCR 46
+#define GCC_PCIE1_PHY_BCR 47
+#define GCC_PCIE1PHY_PHY_BCR 48
+#define GCC_PCIE1_PIPE_ARES 49
+#define GCC_PCIE1_SLEEP_ARES 50
+#define GCC_PCIE1_CORE_STICKY_ARES 51
+#define GCC_PCIE1_AXI_MASTER_ARES 52
+#define GCC_PCIE1_AXI_SLAVE_ARES 53
+#define GCC_PCIE1_AHB_ARES 54
+#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 55
+#define GCC_PCIE1_AXI_SLAVE_STICKY_ARES 56
+#define GCC_PCNOC_BCR 57
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 58
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 59
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 60
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 61
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 62
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 63
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 64
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 65
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 66
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 67
+#define GCC_PCNOC_BUS_TIMEOUT10_BCR 68
+#define GCC_PCNOC_BUS_TIMEOUT11_BCR 69
+#define GCC_PRNG_BCR 70
+#define GCC_Q6SS_DBG_ARES 71
+#define GCC_Q6_AHB_S_ARES 72
+#define GCC_Q6_AHB_ARES 73
+#define GCC_Q6_AXIM2_ARES 74
+#define GCC_Q6_AXIM_ARES 75
+#define GCC_Q6_AXIS_ARES 76
+#define GCC_QDSS_BCR 77
+#define GCC_QPIC_BCR 78
+#define GCC_QUSB2_0_PHY_BCR 79
+#define GCC_SDCC1_BCR 80
+#define GCC_SEC_CTRL_BCR 81
+#define GCC_SPDM_BCR 82
+#define GCC_SYSTEM_NOC_BCR 83
+#define GCC_TCSR_BCR 84
+#define GCC_TLMM_BCR 85
+#define GCC_UBI0_AXI_ARES 86
+#define GCC_UBI0_AHB_ARES 87
+#define GCC_UBI0_NC_AXI_ARES 88
+#define GCC_UBI0_DBG_ARES 89
+#define GCC_UBI0_UTCM_ARES 90
+#define GCC_UBI0_CORE_ARES 91
+#define GCC_UBI32_BCR 92
+#define GCC_UNIPHY_BCR 93
+#define GCC_UNIPHY_AHB_ARES 94
+#define GCC_UNIPHY_SYS_ARES 95
+#define GCC_UNIPHY_RX_ARES 96
+#define GCC_UNIPHY_TX_ARES 97
+#define GCC_USB0_BCR 98
+#define GCC_USB0_PHY_BCR 99
+#define GCC_WCSS_BCR 100
+#define GCC_WCSS_DBG_ARES 101
+#define GCC_WCSS_ECAHB_ARES 102
+#define GCC_WCSS_ACMT_ARES 103
+#define GCC_WCSS_DBG_BDG_ARES 104
+#define GCC_WCSS_AHB_S_ARES 105
+#define GCC_WCSS_AXI_M_ARES 106
+#define GCC_WCSS_AXI_S_ARES 107
+#define GCC_WCSS_Q6_BCR 108
+#define GCC_WCSSAON_RESET 109
+#define GCC_UNIPHY_SOFT_RESET 110
+#define GCC_GEPHY_MISC_ARES 111
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,ipq9574-gcc.h b/include/dt-bindings/reset/qcom,ipq9574-gcc.h
index d01dc6a24cf1..c709d103673d 100644
--- a/include/dt-bindings/reset/qcom,ipq9574-gcc.h
+++ b/include/dt-bindings/reset/qcom,ipq9574-gcc.h
@@ -160,5 +160,6 @@
#define GCC_WCSS_Q6_BCR 151
#define GCC_WCSS_Q6_TBU_BCR 152
#define GCC_TCSR_BCR 153
+#define GCC_CRYPTO_BCR 154
#endif
diff --git a/include/dt-bindings/reset/qcom,sm8350-videocc.h b/include/dt-bindings/reset/qcom,sm8350-videocc.h
new file mode 100644
index 000000000000..cd356b207a4a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8350-videocc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_VIDEO_CC_SM8350_H
+#define _DT_BINDINGS_RESET_QCOM_VIDEO_CC_SM8350_H
+
+#define VIDEO_CC_CVP_INTERFACE_BCR 0
+#define VIDEO_CC_CVP_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_CVP_MVS0C_BCR 3
+#define VIDEO_CC_CVP_MVS1_BCR 4
+#define VIDEO_CC_MVS1C_CLK_ARES 5
+#define VIDEO_CC_CVP_MVS1C_BCR 6
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sm8450-gpucc.h b/include/dt-bindings/reset/qcom,sm8450-gpucc.h
new file mode 100644
index 000000000000..58ba8f987107
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8450-gpucc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8450_H
+#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8450_H
+
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 8
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sm8650-gpucc.h b/include/dt-bindings/reset/qcom,sm8650-gpucc.h
new file mode 100644
index 000000000000..f021a6cccc66
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8650-gpucc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H
+#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H
+
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 8
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3588-cru.h b/include/dt-bindings/reset/rockchip,rk3588-cru.h
index 738e56aead93..d4264db2a07f 100644
--- a/include/dt-bindings/reset/rockchip,rk3588-cru.h
+++ b/include/dt-bindings/reset/rockchip,rk3588-cru.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (c) 2021 Rockchip Electronics Co. Ltd.
* Copyright (c) 2022 Collabora Ltd.
diff --git a/include/dt-bindings/reset/st,stm32mp25-rcc.h b/include/dt-bindings/reset/st,stm32mp25-rcc.h
new file mode 100644
index 000000000000..d5615930bcc8
--- /dev/null
+++ b/include/dt-bindings/reset/st,stm32mp25-rcc.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author(s): Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP25_RESET_H_
+#define _DT_BINDINGS_STM32MP25_RESET_H_
+
+#define TIM1_R 0
+#define TIM2_R 1
+#define TIM3_R 2
+#define TIM4_R 3
+#define TIM5_R 4
+#define TIM6_R 5
+#define TIM7_R 6
+#define TIM8_R 7
+#define TIM10_R 8
+#define TIM11_R 9
+#define TIM12_R 10
+#define TIM13_R 11
+#define TIM14_R 12
+#define TIM15_R 13
+#define TIM16_R 14
+#define TIM17_R 15
+#define TIM20_R 16
+#define LPTIM1_R 17
+#define LPTIM2_R 18
+#define LPTIM3_R 19
+#define LPTIM4_R 20
+#define LPTIM5_R 21
+#define SPI1_R 22
+#define SPI2_R 23
+#define SPI3_R 24
+#define SPI4_R 25
+#define SPI5_R 26
+#define SPI6_R 27
+#define SPI7_R 28
+#define SPI8_R 29
+#define SPDIFRX_R 30
+#define USART1_R 31
+#define USART2_R 32
+#define USART3_R 33
+#define UART4_R 34
+#define UART5_R 35
+#define USART6_R 36
+#define UART7_R 37
+#define UART8_R 38
+#define UART9_R 39
+#define LPUART1_R 40
+#define IS2M_R 41
+#define I2C1_R 42
+#define I2C2_R 43
+#define I2C3_R 44
+#define I2C4_R 45
+#define I2C5_R 46
+#define I2C6_R 47
+#define I2C7_R 48
+#define I2C8_R 49
+#define SAI1_R 50
+#define SAI2_R 51
+#define SAI3_R 52
+#define SAI4_R 53
+#define MDF1_R 54
+#define MDF2_R 55
+#define FDCAN_R 56
+#define HDP_R 57
+#define ADC12_R 58
+#define ADC3_R 59
+#define ETH1_R 60
+#define ETH2_R 61
+#define USB2_R 62
+#define USB2PHY1_R 63
+#define USB2PHY2_R 64
+#define USB3DR_R 65
+#define USB3PCIEPHY_R 66
+#define USBTC_R 67
+#define ETHSW_R 68
+#define SDMMC1_R 69
+#define SDMMC1DLL_R 70
+#define SDMMC2_R 71
+#define SDMMC2DLL_R 72
+#define SDMMC3_R 73
+#define SDMMC3DLL_R 74
+#define GPU_R 75
+#define LTDC_R 76
+#define DSI_R 77
+#define LVDS_R 78
+#define CSI_R 79
+#define DCMIPP_R 80
+#define CCI_R 81
+#define VDEC_R 82
+#define VENC_R 83
+#define WWDG1_R 84
+#define WWDG2_R 85
+#define VREF_R 86
+#define DTS_R 87
+#define CRC_R 88
+#define SERC_R 89
+#define OSPIIOM_R 90
+#define I3C1_R 91
+#define I3C2_R 92
+#define I3C3_R 93
+#define I3C4_R 94
+#define IWDG2_KER_R 95
+#define IWDG4_KER_R 96
+#define RNG_R 97
+#define PKA_R 98
+#define SAES_R 99
+#define HASH_R 100
+#define CRYP1_R 101
+#define CRYP2_R 102
+#define PCIE_R 103
+#define OSPI1_R 104
+#define OSPI1DLL_R 105
+#define OSPI2_R 106
+#define OSPI2DLL_R 107
+#define FMC_R 108
+#define DBG_R 109
+#define GPIOA_R 110
+#define GPIOB_R 111
+#define GPIOC_R 112
+#define GPIOD_R 113
+#define GPIOE_R 114
+#define GPIOF_R 115
+#define GPIOG_R 116
+#define GPIOH_R 117
+#define GPIOI_R 118
+#define GPIOJ_R 119
+#define GPIOK_R 120
+#define GPIOZ_R 121
+#define HPDMA1_R 122
+#define HPDMA2_R 123
+#define HPDMA3_R 124
+#define LPDMA_R 125
+#define HSEM_R 126
+#define IPCC1_R 127
+#define IPCC2_R 128
+#define C2_HOLDBOOT_R 129
+#define C1_HOLDBOOT_R 130
+#define C1_R 131
+#define C1P1POR_R 132
+#define C1P1_R 133
+#define C2_R 134
+#define C3_R 135
+#define SYS_R 136
+#define VSW_R 137
+#define C1MS_R 138
+#define DDRCP_R 139
+#define DDRCAPB_R 140
+#define DDRPHYCAPB_R 141
+#define DDRCFG_R 142
+#define DDR_R 143
+
+#define STM32MP25_LAST_RESET 144
+
+#define RST_SCMI_C1_R 0
+#define RST_SCMI_C2_R 1
+#define RST_SCMI_C1_HOLDBOOT_R 2
+#define RST_SCMI_C2_HOLDBOOT_R 3
+#define RST_SCMI_FMC 4
+#define RST_SCMI_OSPI1 5
+#define RST_SCMI_OSPI1DLL 6
+#define RST_SCMI_OSPI2 7
+#define RST_SCMI_OSPI2DLL 8
+
+#endif /* _DT_BINDINGS_STM32MP25_RESET_H_ */
diff --git a/include/dt-bindings/reset/starfive,jh7110-crg.h b/include/dt-bindings/reset/starfive,jh7110-crg.h
index d78e38690ceb..eaf4a0d84f6a 100644
--- a/include/dt-bindings/reset/starfive,jh7110-crg.h
+++ b/include/dt-bindings/reset/starfive,jh7110-crg.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
*/
#ifndef __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__
@@ -151,4 +152,63 @@
#define JH7110_AONRST_END 8
+/* STGCRG resets */
+#define JH7110_STGRST_SYSCON 0
+#define JH7110_STGRST_HIFI4_CORE 1
+#define JH7110_STGRST_HIFI4_AXI 2
+#define JH7110_STGRST_SEC_AHB 3
+#define JH7110_STGRST_E24_CORE 4
+#define JH7110_STGRST_DMA1P_AXI 5
+#define JH7110_STGRST_DMA1P_AHB 6
+#define JH7110_STGRST_USB0_AXI 7
+#define JH7110_STGRST_USB0_APB 8
+#define JH7110_STGRST_USB0_UTMI_APB 9
+#define JH7110_STGRST_USB0_PWRUP 10
+#define JH7110_STGRST_PCIE0_AXI_MST0 11
+#define JH7110_STGRST_PCIE0_AXI_SLV0 12
+#define JH7110_STGRST_PCIE0_AXI_SLV 13
+#define JH7110_STGRST_PCIE0_BRG 14
+#define JH7110_STGRST_PCIE0_CORE 15
+#define JH7110_STGRST_PCIE0_APB 16
+#define JH7110_STGRST_PCIE1_AXI_MST0 17
+#define JH7110_STGRST_PCIE1_AXI_SLV0 18
+#define JH7110_STGRST_PCIE1_AXI_SLV 19
+#define JH7110_STGRST_PCIE1_BRG 20
+#define JH7110_STGRST_PCIE1_CORE 21
+#define JH7110_STGRST_PCIE1_APB 22
+
+#define JH7110_STGRST_END 23
+
+/* ISPCRG resets */
+#define JH7110_ISPRST_ISPV2_TOP_WRAPPER_P 0
+#define JH7110_ISPRST_ISPV2_TOP_WRAPPER_C 1
+#define JH7110_ISPRST_M31DPHY_HW 2
+#define JH7110_ISPRST_M31DPHY_B09_AON 3
+#define JH7110_ISPRST_VIN_APB 4
+#define JH7110_ISPRST_VIN_PIXEL_IF0 5
+#define JH7110_ISPRST_VIN_PIXEL_IF1 6
+#define JH7110_ISPRST_VIN_PIXEL_IF2 7
+#define JH7110_ISPRST_VIN_PIXEL_IF3 8
+#define JH7110_ISPRST_VIN_SYS 9
+#define JH7110_ISPRST_VIN_P_AXI_RD 10
+#define JH7110_ISPRST_VIN_P_AXI_WR 11
+
+#define JH7110_ISPRST_END 12
+
+/* VOUTCRG resets */
+#define JH7110_VOUTRST_DC8200_AXI 0
+#define JH7110_VOUTRST_DC8200_AHB 1
+#define JH7110_VOUTRST_DC8200_CORE 2
+#define JH7110_VOUTRST_DSITX_DPI 3
+#define JH7110_VOUTRST_DSITX_APB 4
+#define JH7110_VOUTRST_DSITX_RXESC 5
+#define JH7110_VOUTRST_DSITX_SYS 6
+#define JH7110_VOUTRST_DSITX_TXBYTEHS 7
+#define JH7110_VOUTRST_DSITX_TXESC 8
+#define JH7110_VOUTRST_HDMI_TX_HDMI 9
+#define JH7110_VOUTRST_MIPITX_DPHY_SYS 10
+#define JH7110_VOUTRST_MIPITX_DPHY_TXBYTEHS 11
+
+#define JH7110_VOUTRST_END 12
+
#endif /* __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__ */
diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h
index 4ffa7c3612e6..9071f139649f 100644
--- a/include/dt-bindings/reset/stm32mp1-resets.h
+++ b/include/dt-bindings/reset/stm32mp1-resets.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
diff --git a/include/dt-bindings/reset/stm32mp13-resets.h b/include/dt-bindings/reset/stm32mp13-resets.h
index 934864e90da6..ecb37c7ddde1 100644
--- a/include/dt-bindings/reset/stm32mp13-resets.h
+++ b/include/dt-bindings/reset/stm32mp13-resets.h
@@ -1,7 +1,7 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
- * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
*/
#ifndef _DT_BINDINGS_STM32MP13_RESET_H_
diff --git a/include/dt-bindings/reset/sun20i-d1-ccu.h b/include/dt-bindings/reset/sun20i-d1-ccu.h
index f8001cf50bf1..79e52aca5912 100644
--- a/include/dt-bindings/reset/sun20i-d1-ccu.h
+++ b/include/dt-bindings/reset/sun20i-d1-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 huangzhenwei@allwinnertech.com
* Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
diff --git a/include/dt-bindings/reset/sun20i-d1-r-ccu.h b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
index d93d6423d283..e20babc990af 100644
--- a/include/dt-bindings/reset/sun20i-d1-r-ccu.h
+++ b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
*/
diff --git a/include/dt-bindings/reset/sun50i-a100-ccu.h b/include/dt-bindings/reset/sun50i-a100-ccu.h
index 55c0ada99885..d13764bc1860 100644
--- a/include/dt-bindings/reset/sun50i-a100-ccu.h
+++ b/include/dt-bindings/reset/sun50i-a100-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
*/
diff --git a/include/dt-bindings/reset/sun50i-a100-r-ccu.h b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
index 737bf6f66626..1e7c4431f03c 100644
--- a/include/dt-bindings/reset/sun50i-a100-r-ccu.h
+++ b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
*/
diff --git a/include/dt-bindings/reset/sun50i-h6-ccu.h b/include/dt-bindings/reset/sun50i-h6-ccu.h
index 81106f455097..d038ddfa4818 100644
--- a/include/dt-bindings/reset/sun50i-h6-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h6-ccu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
index 7950e799c76d..d541ade884fc 100644
--- a/include/dt-bindings/reset/sun50i-h6-r-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
*/
diff --git a/include/dt-bindings/reset/sun50i-h616-ccu.h b/include/dt-bindings/reset/sun50i-h616-ccu.h
index cb6285a8d128..1bd8bb0a11be 100644
--- a/include/dt-bindings/reset/sun50i-h616-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h616-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2020 Arm Ltd.
*/
diff --git a/include/dt-bindings/soc/rockchip,vop2.h b/include/dt-bindings/soc/rockchip,vop2.h
index 6e66a802b96a..668f199df9f0 100644
--- a/include/dt-bindings/soc/rockchip,vop2.h
+++ b/include/dt-bindings/soc/rockchip,vop2.h
@@ -10,5 +10,9 @@
#define ROCKCHIP_VOP2_EP_LVDS0 5
#define ROCKCHIP_VOP2_EP_MIPI1 6
#define ROCKCHIP_VOP2_EP_LVDS1 7
+#define ROCKCHIP_VOP2_EP_HDMI1 8
+#define ROCKCHIP_VOP2_EP_EDP1 9
+#define ROCKCHIP_VOP2_EP_DP0 10
+#define ROCKCHIP_VOP2_EP_DP1 11
#endif /* __DT_BINDINGS_ROCKCHIP_VOP2_H */
diff --git a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
index 9f7c5103bc82..39f203256c4f 100644
--- a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
+++ b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
@@ -131,6 +131,14 @@
#define RX_CODEC_DMA_RX_7 126
#define QUINARY_MI2S_RX 127
#define QUINARY_MI2S_TX 128
+#define DISPLAY_PORT_RX_0 DISPLAY_PORT_RX
+#define DISPLAY_PORT_RX_1 129
+#define DISPLAY_PORT_RX_2 130
+#define DISPLAY_PORT_RX_3 131
+#define DISPLAY_PORT_RX_4 132
+#define DISPLAY_PORT_RX_5 133
+#define DISPLAY_PORT_RX_6 134
+#define DISPLAY_PORT_RX_7 135
#define LPASS_CLK_ID_PRI_MI2S_IBIT 1
#define LPASS_CLK_ID_PRI_MI2S_EBIT 2
diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
index 8fa5a46675c4..997e2f55128a 100644
--- a/include/dt-bindings/thermal/mediatek,lvts-thermal.h
+++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
@@ -7,6 +7,15 @@
#ifndef __MEDIATEK_LVTS_DT_H
#define __MEDIATEK_LVTS_DT_H
+#define MT7988_CPU_0 0
+#define MT7988_CPU_1 1
+#define MT7988_ETH2P5G_0 2
+#define MT7988_ETH2P5G_1 3
+#define MT7988_TOPS_0 4
+#define MT7988_TOPS_1 5
+#define MT7988_ETHWARP_0 6
+#define MT7988_ETHWARP_1 7
+
#define MT8195_MCU_BIG_CPU0 0
#define MT8195_MCU_BIG_CPU1 1
#define MT8195_MCU_BIG_CPU2 2
@@ -26,4 +35,23 @@
#define MT8195_AP_CAM0 15
#define MT8195_AP_CAM1 16
+#define MT8192_MCU_BIG_CPU0 0
+#define MT8192_MCU_BIG_CPU1 1
+#define MT8192_MCU_BIG_CPU2 2
+#define MT8192_MCU_BIG_CPU3 3
+#define MT8192_MCU_LITTLE_CPU0 4
+#define MT8192_MCU_LITTLE_CPU1 5
+#define MT8192_MCU_LITTLE_CPU2 6
+#define MT8192_MCU_LITTLE_CPU3 7
+
+#define MT8192_AP_VPU0 8
+#define MT8192_AP_VPU1 9
+#define MT8192_AP_GPU0 10
+#define MT8192_AP_GPU1 11
+#define MT8192_AP_INFRA 12
+#define MT8192_AP_CAM 13
+#define MT8192_AP_MD0 14
+#define MT8192_AP_MD1 15
+#define MT8192_AP_MD2 16
+
#endif /* __MEDIATEK_LVTS_DT_H */
diff --git a/include/dt-bindings/thermal/tegra234-bpmp-thermal.h b/include/dt-bindings/thermal/tegra234-bpmp-thermal.h
new file mode 100644
index 000000000000..934787950932
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra234-bpmp-thermal.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for binding nvidia,tegra234-bpmp-thermal.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA234_BPMP_THERMAL_H
+#define _DT_BINDINGS_THERMAL_TEGRA234_BPMP_THERMAL_H
+
+#define TEGRA234_BPMP_THERMAL_ZONE_CPU 0
+#define TEGRA234_BPMP_THERMAL_ZONE_GPU 1
+#define TEGRA234_BPMP_THERMAL_ZONE_CV0 2
+#define TEGRA234_BPMP_THERMAL_ZONE_CV1 3
+#define TEGRA234_BPMP_THERMAL_ZONE_CV2 4
+#define TEGRA234_BPMP_THERMAL_ZONE_SOC0 5
+#define TEGRA234_BPMP_THERMAL_ZONE_SOC1 6
+#define TEGRA234_BPMP_THERMAL_ZONE_SOC2 7
+#define TEGRA234_BPMP_THERMAL_ZONE_TJ_MAX 8
+
+#endif
diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h
new file mode 100644
index 000000000000..7ae6d84b2bd9
--- /dev/null
+++ b/include/dt-bindings/watchdog/aspeed-wdt.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef DT_BINDINGS_ASPEED_WDT_H
+#define DT_BINDINGS_ASPEED_WDT_H
+
+#define AST2500_WDT_RESET_CPU (1 << 0)
+#define AST2500_WDT_RESET_COPROC (1 << 1)
+#define AST2500_WDT_RESET_SDRAM (1 << 2)
+#define AST2500_WDT_RESET_AHB (1 << 3)
+#define AST2500_WDT_RESET_I2C (1 << 4)
+#define AST2500_WDT_RESET_MAC0 (1 << 5)
+#define AST2500_WDT_RESET_MAC1 (1 << 6)
+#define AST2500_WDT_RESET_GRAPHICS (1 << 7)
+#define AST2500_WDT_RESET_USB2_HOST_HUB (1 << 8)
+#define AST2500_WDT_RESET_USB_HOST (1 << 9)
+#define AST2500_WDT_RESET_HID_EHCI (1 << 10)
+#define AST2500_WDT_RESET_VIDEO (1 << 11)
+#define AST2500_WDT_RESET_HAC (1 << 12)
+#define AST2500_WDT_RESET_LPC (1 << 13)
+#define AST2500_WDT_RESET_SDIO (1 << 14)
+#define AST2500_WDT_RESET_MIC (1 << 15)
+#define AST2500_WDT_RESET_CRT (1 << 16)
+#define AST2500_WDT_RESET_PWM (1 << 17)
+#define AST2500_WDT_RESET_PECI (1 << 18)
+#define AST2500_WDT_RESET_JTAG (1 << 19)
+#define AST2500_WDT_RESET_ADC (1 << 20)
+#define AST2500_WDT_RESET_GPIO (1 << 21)
+#define AST2500_WDT_RESET_MCTP (1 << 22)
+#define AST2500_WDT_RESET_XDMA (1 << 23)
+#define AST2500_WDT_RESET_SPI (1 << 24)
+#define AST2500_WDT_RESET_SOC_MISC (1 << 25)
+
+#define AST2500_WDT_RESET_DEFAULT 0x023ffff3
+
+#define AST2600_WDT_RESET1_CPU (1 << 0)
+#define AST2600_WDT_RESET1_SDRAM (1 << 1)
+#define AST2600_WDT_RESET1_AHB (1 << 2)
+#define AST2600_WDT_RESET1_SLI (1 << 3)
+#define AST2600_WDT_RESET1_SOC_MISC0 (1 << 4)
+#define AST2600_WDT_RESET1_COPROC (1 << 5)
+#define AST2600_WDT_RESET1_USB_A (1 << 6)
+#define AST2600_WDT_RESET1_USB_B (1 << 7)
+#define AST2600_WDT_RESET1_UHCI (1 << 8)
+#define AST2600_WDT_RESET1_GRAPHICS (1 << 9)
+#define AST2600_WDT_RESET1_CRT (1 << 10)
+#define AST2600_WDT_RESET1_VIDEO (1 << 11)
+#define AST2600_WDT_RESET1_HAC (1 << 12)
+#define AST2600_WDT_RESET1_DP (1 << 13)
+#define AST2600_WDT_RESET1_DP_MCU (1 << 14)
+#define AST2600_WDT_RESET1_GP_MCU (1 << 15)
+#define AST2600_WDT_RESET1_MAC0 (1 << 16)
+#define AST2600_WDT_RESET1_MAC1 (1 << 17)
+#define AST2600_WDT_RESET1_SDIO0 (1 << 18)
+#define AST2600_WDT_RESET1_JTAG0 (1 << 19)
+#define AST2600_WDT_RESET1_MCTP0 (1 << 20)
+#define AST2600_WDT_RESET1_MCTP1 (1 << 21)
+#define AST2600_WDT_RESET1_XDMA0 (1 << 22)
+#define AST2600_WDT_RESET1_XDMA1 (1 << 23)
+#define AST2600_WDT_RESET1_GPIO0 (1 << 24)
+#define AST2600_WDT_RESET1_RVAS (1 << 25)
+
+#define AST2600_WDT_RESET1_DEFAULT 0x030f1ff1
+
+#define AST2600_WDT_RESET2_CPU (1 << 0)
+#define AST2600_WDT_RESET2_SPI (1 << 1)
+#define AST2600_WDT_RESET2_AHB2 (1 << 2)
+#define AST2600_WDT_RESET2_SLI2 (1 << 3)
+#define AST2600_WDT_RESET2_SOC_MISC1 (1 << 4)
+#define AST2600_WDT_RESET2_MAC2 (1 << 5)
+#define AST2600_WDT_RESET2_MAC3 (1 << 6)
+#define AST2600_WDT_RESET2_SDIO1 (1 << 7)
+#define AST2600_WDT_RESET2_JTAG1 (1 << 8)
+#define AST2600_WDT_RESET2_GPIO1 (1 << 9)
+#define AST2600_WDT_RESET2_MDIO (1 << 10)
+#define AST2600_WDT_RESET2_LPC (1 << 11)
+#define AST2600_WDT_RESET2_PECI (1 << 12)
+#define AST2600_WDT_RESET2_PWM (1 << 13)
+#define AST2600_WDT_RESET2_ADC (1 << 14)
+#define AST2600_WDT_RESET2_FSI (1 << 15)
+#define AST2600_WDT_RESET2_I2C (1 << 16)
+#define AST2600_WDT_RESET2_I3C_GLOBAL (1 << 17)
+#define AST2600_WDT_RESET2_I3C0 (1 << 18)
+#define AST2600_WDT_RESET2_I3C1 (1 << 19)
+#define AST2600_WDT_RESET2_I3C2 (1 << 20)
+#define AST2600_WDT_RESET2_I3C3 (1 << 21)
+#define AST2600_WDT_RESET2_I3C4 (1 << 22)
+#define AST2600_WDT_RESET2_I3C5 (1 << 23)
+#define AST2600_WDT_RESET2_ESPI (1 << 26)
+
+#define AST2600_WDT_RESET2_DEFAULT 0x03fffff1
+
+#endif
diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h
index c47dc5405f79..516a3f51179e 100644
--- a/include/keys/asymmetric-parser.h
+++ b/include/keys/asymmetric-parser.h
@@ -10,6 +10,8 @@
#ifndef _KEYS_ASYMMETRIC_PARSER_H
#define _KEYS_ASYMMETRIC_PARSER_H
+struct key_preparsed_payload;
+
/*
* Key data parser. Called during key instantiation.
*/
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 91e080efb918..8365adf842ef 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -23,10 +23,15 @@ extern int restrict_link_by_builtin_trusted(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
+int restrict_link_by_digsig_builtin(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
extern __init int load_module_cert(struct key *keyring);
#else
#define restrict_link_by_builtin_trusted restrict_link_reject
+#define restrict_link_by_digsig_builtin restrict_link_reject
static inline __init int load_module_cert(struct key *keyring)
{
@@ -41,8 +46,17 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
+int restrict_link_by_digsig_builtin_and_secondary(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
+void __init add_to_secondary_keyring(const char *source, const void *data, size_t len);
#else
#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
+#define restrict_link_by_digsig_builtin_and_secondary restrict_link_by_digsig_builtin
+static inline void __init add_to_secondary_keyring(const char *source, const void *data, size_t len)
+{
+}
#endif
#ifdef CONFIG_INTEGRITY_MACHINE_KEYRING
diff --git a/include/kunit/attributes.h b/include/kunit/attributes.h
new file mode 100644
index 000000000000..bc76a0b786d2
--- /dev/null
+++ b/include/kunit/attributes.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit API to save and access test attributes
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#ifndef _KUNIT_ATTRIBUTES_H
+#define _KUNIT_ATTRIBUTES_H
+
+/*
+ * struct kunit_attr_filter - representation of attributes filter with the
+ * attribute object and string input
+ */
+struct kunit_attr_filter {
+ struct kunit_attr *attr;
+ char *input;
+};
+
+/*
+ * Returns the name of the filter's attribute.
+ */
+const char *kunit_attr_filter_name(struct kunit_attr_filter filter);
+
+/*
+ * Print all test attributes for a test case or suite.
+ * Output format for test cases: "# <test_name>.<attribute>: <value>"
+ * Output format for test suites: "# <attribute>: <value>"
+ */
+void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level);
+
+/*
+ * Returns the number of fitlers in input.
+ */
+int kunit_get_filter_count(char *input);
+
+/*
+ * Parse attributes filter input and return an objects containing the
+ * attribute object and the string input of the next filter.
+ */
+struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err);
+
+/*
+ * Returns a copy of the suite containing only tests that pass the filter.
+ */
+struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite,
+ struct kunit_attr_filter filter, char *action, int *err);
+
+#endif /* _KUNIT_ATTRIBUTES_H */
diff --git a/include/kunit/device.h b/include/kunit/device.h
new file mode 100644
index 000000000000..2450110ad64e
--- /dev/null
+++ b/include/kunit/device.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit basic device implementation
+ *
+ * Helpers for creating and managing fake devices for KUnit tests.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_DEVICE_H
+#define _KUNIT_DEVICE_H
+
+#if IS_ENABLED(CONFIG_KUNIT)
+
+#include <kunit/test.h>
+
+struct device;
+struct device_driver;
+
+/**
+ * kunit_driver_create() - Create a struct device_driver attached to the kunit_bus
+ * @test: The test context object.
+ * @name: The name to give the created driver.
+ *
+ * Creates a struct device_driver attached to the kunit_bus, with the name @name.
+ * This driver will automatically be cleaned up on test exit.
+ *
+ * Return: a stub struct device_driver, managed by KUnit, with the name @name.
+ */
+struct device_driver *kunit_driver_create(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given name,
+ * and a corresponding driver. The device and driver will be cleaned up on test
+ * exit, or when kunit_device_unregister is called. See also
+ * kunit_device_register_with_driver, if you wish to provide your own
+ * struct device_driver.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register_with_driver() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ * @drv: The struct device_driver to associate with the device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given
+ * name, and driver. The device will be cleaned up on test exit, or when
+ * kunit_device_unregister is called. See also kunit_device_register, if you
+ * wish KUnit to create and manage a driver for you.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register_with_driver(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv);
+
+/**
+ * kunit_device_unregister() - Unregister a KUnit-managed device
+ * @test: The test context object which created the device
+ * @dev: The device.
+ *
+ * Unregisters and destroys a struct device which was created with
+ * kunit_device_register or kunit_device_register_with_driver. If KUnit created
+ * a driver, cleans it up as well.
+ */
+void kunit_device_unregister(struct kunit *test, struct device *dev);
+
+#endif
+
+#endif
diff --git a/include/kunit/resource.h b/include/kunit/resource.h
index c0d88b318e90..4ad69a2642a5 100644
--- a/include/kunit/resource.h
+++ b/include/kunit/resource.h
@@ -387,4 +387,117 @@ static inline int kunit_destroy_named_resource(struct kunit *test,
*/
void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
+/* A 'deferred action' function to be used with kunit_add_action. */
+typedef void (kunit_action_t)(void *);
+
+/**
+ * KUNIT_DEFINE_ACTION_WRAPPER() - Wrap a function for use as a deferred action.
+ *
+ * @wrapper: The name of the new wrapper function define.
+ * @orig: The original function to wrap.
+ * @arg_type: The type of the argument accepted by @orig.
+ *
+ * Defines a wrapper for a function which accepts a single, pointer-sized
+ * argument. This wrapper can then be passed to kunit_add_action() and
+ * similar. This should be used in preference to casting a function
+ * directly to kunit_action_t, as casting function pointers will break
+ * control flow integrity (CFI), leading to crashes.
+ */
+#define KUNIT_DEFINE_ACTION_WRAPPER(wrapper, orig, arg_type) \
+ static void wrapper(void *in) \
+ { \
+ arg_type arg = (arg_type)in; \
+ orig(arg); \
+ }
+
+
+/**
+ * kunit_add_action() - Call a function when the test ends.
+ * @test: Test case to associate the action with.
+ * @action: The function to run on test exit
+ * @ctx: Data passed into @func
+ *
+ * Defer the execution of a function until the test exits, either normally or
+ * due to a failure. @ctx is passed as additional context. All functions
+ * registered with kunit_add_action() will execute in the opposite order to that
+ * they were registered in.
+ *
+ * This is useful for cleaning up allocated memory and resources, as these
+ * functions are called even if the test aborts early due to, e.g., a failed
+ * assertion.
+ *
+ * See also: devm_add_action() for the devres equivalent.
+ *
+ * Returns:
+ * 0 on success, an error if the action could not be deferred.
+ */
+int kunit_add_action(struct kunit *test, kunit_action_t *action, void *ctx);
+
+/**
+ * kunit_add_action_or_reset() - Call a function when the test ends.
+ * @test: Test case to associate the action with.
+ * @action: The function to run on test exit
+ * @ctx: Data passed into @func
+ *
+ * Defer the execution of a function until the test exits, either normally or
+ * due to a failure. @ctx is passed as additional context. All functions
+ * registered with kunit_add_action() will execute in the opposite order to that
+ * they were registered in.
+ *
+ * This is useful for cleaning up allocated memory and resources, as these
+ * functions are called even if the test aborts early due to, e.g., a failed
+ * assertion.
+ *
+ * If the action cannot be created (e.g., due to the system being out of memory),
+ * then action(ctx) will be called immediately, and an error will be returned.
+ *
+ * See also: devm_add_action_or_reset() for the devres equivalent.
+ *
+ * Returns:
+ * 0 on success, an error if the action could not be deferred.
+ */
+int kunit_add_action_or_reset(struct kunit *test, kunit_action_t *action,
+ void *ctx);
+
+/**
+ * kunit_remove_action() - Cancel a matching deferred action.
+ * @test: Test case the action is associated with.
+ * @action: The deferred function to cancel.
+ * @ctx: The context passed to the deferred function to trigger.
+ *
+ * Prevent an action deferred via kunit_add_action() from executing when the
+ * test terminates.
+ *
+ * If the function/context pair was deferred multiple times, only the most
+ * recent one will be cancelled.
+ *
+ * See also: devm_remove_action() for the devres equivalent.
+ */
+void kunit_remove_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx);
+
+/**
+ * kunit_release_action() - Run a matching action call immediately.
+ * @test: Test case the action is associated with.
+ * @action: The deferred function to trigger.
+ * @ctx: The context passed to the deferred function to trigger.
+ *
+ * Execute a function deferred via kunit_add_action()) immediately, rather than
+ * when the test ends.
+ *
+ * If the function/context pair was deferred multiple times, it will only be
+ * executed once here. The most recent deferral will no longer execute when
+ * the test ends.
+ *
+ * kunit_release_action(test, func, ctx);
+ * is equivalent to
+ * func(ctx);
+ * kunit_remove_action(test, func, ctx);
+ *
+ * See also: devm_release_action() for the devres equivalent.
+ */
+void kunit_release_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx);
#endif /* _KUNIT_RESOURCE_H */
diff --git a/include/kunit/skbuff.h b/include/kunit/skbuff.h
new file mode 100644
index 000000000000..44d12370939a
--- /dev/null
+++ b/include/kunit/skbuff.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit resource management helpers for SKBs (skbuff).
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef _KUNIT_SKBUFF_H
+#define _KUNIT_SKBUFF_H
+
+#include <kunit/resource.h>
+#include <linux/skbuff.h>
+
+static void kunit_action_kfree_skb(void *p)
+{
+ kfree_skb((struct sk_buff *)p);
+}
+
+/**
+ * kunit_zalloc_skb() - Allocate and initialize a resource managed skb.
+ * @test: The test case to which the skb belongs
+ * @len: size to allocate
+ *
+ * Allocate a new struct sk_buff with GFP_KERNEL, zero fill the give length
+ * and add it as a resource to the kunit test for automatic cleanup.
+ *
+ * Returns: newly allocated SKB, or %NULL on error
+ */
+static inline struct sk_buff *kunit_zalloc_skb(struct kunit *test, int len,
+ gfp_t gfp)
+{
+ struct sk_buff *res = alloc_skb(len, GFP_KERNEL);
+
+ if (!res || skb_pad(res, len))
+ return NULL;
+
+ if (kunit_add_action_or_reset(test, kunit_action_kfree_skb, res))
+ return NULL;
+
+ return res;
+}
+
+/**
+ * kunit_kfree_skb() - Like kfree_skb except for allocations managed by KUnit.
+ * @test: The test case to which the resource belongs.
+ * @skb: The SKB to free.
+ */
+static inline void kunit_kfree_skb(struct kunit *test, struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ kunit_release_action(test, kunit_action_kfree_skb, (void *)skb);
+}
+
+#endif /* _KUNIT_SKBUFF_H */
diff --git a/include/kunit/static_stub.h b/include/kunit/static_stub.h
index 9b80150a5d62..bf940322dfc0 100644
--- a/include/kunit/static_stub.h
+++ b/include/kunit/static_stub.h
@@ -11,7 +11,7 @@
#if !IS_ENABLED(CONFIG_KUNIT)
/* If CONFIG_KUNIT is not enabled, these stubs quietly disappear. */
-#define KUNIT_TRIGGER_STATIC_STUB(real_fn_name, args...) do {} while (0)
+#define KUNIT_STATIC_STUB_REDIRECT(real_fn_name, args...) do {} while (0)
#else
@@ -30,7 +30,7 @@
* This is a function prologue which is used to allow calls to the current
* function to be redirected by a KUnit test. KUnit tests can call
* kunit_activate_static_stub() to pass a replacement function in. The
- * replacement function will be called by KUNIT_TRIGGER_STATIC_STUB(), which
+ * replacement function will be called by KUNIT_STATIC_STUB_REDIRECT(), which
* will then return from the function. If the caller is not in a KUnit context,
* the function will continue execution as normal.
*
@@ -87,13 +87,13 @@ void __kunit_activate_static_stub(struct kunit *test,
* When activated, calls to real_fn_addr from within this test (even if called
* indirectly) will instead call replacement_addr. The function pointed to by
* real_fn_addr must begin with the static stub prologue in
- * KUNIT_TRIGGER_STATIC_STUB() for this to work. real_fn_addr and
+ * KUNIT_STATIC_STUB_REDIRECT() for this to work. real_fn_addr and
* replacement_addr must have the same type.
*
* The redirection can be disabled again with kunit_deactivate_static_stub().
*/
#define kunit_activate_static_stub(test, real_fn_addr, replacement_addr) do { \
- typecheck_fn(typeof(&real_fn_addr), replacement_addr); \
+ typecheck_fn(typeof(&replacement_addr), real_fn_addr); \
__kunit_activate_static_stub(test, real_fn_addr, replacement_addr); \
} while (0)
diff --git a/include/kunit/test-bug.h b/include/kunit/test-bug.h
index 30ca541b6ff2..47aa8f21ccce 100644
--- a/include/kunit/test-bug.h
+++ b/include/kunit/test-bug.h
@@ -9,6 +9,8 @@
#ifndef _KUNIT_TEST_BUG_H
#define _KUNIT_TEST_BUG_H
+#include <linux/stddef.h> /* for NULL */
+
#if IS_ENABLED(CONFIG_KUNIT)
#include <linux/jump_label.h> /* For static branch */
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 57b309c6ca27..fcb4a4940ace 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -12,6 +12,7 @@
#include <kunit/assert.h>
#include <kunit/try-catch.h>
+#include <linux/args.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/err.h>
@@ -32,9 +33,7 @@
DECLARE_STATIC_KEY_FALSE(kunit_running);
struct kunit;
-
-/* Size of log associated with test. */
-#define KUNIT_LOG_SIZE 2048
+struct string_stream;
/* Maximum size of parameter description string. */
#define KUNIT_PARAM_DESC_SIZE 128
@@ -47,6 +46,7 @@ struct kunit;
* sub-subtest. See the "Subtests" section in
* https://node-tap.org/tap-protocol/
*/
+#define KUNIT_INDENT_LEN 4
#define KUNIT_SUBTEST_INDENT " "
#define KUNIT_SUBSUBTEST_INDENT " "
@@ -62,12 +62,35 @@ enum kunit_status {
KUNIT_SKIPPED,
};
+/* Attribute struct/enum definitions */
+
+/*
+ * Speed Attribute is stored as an enum and separated into categories of
+ * speed: very_slowm, slow, and normal. These speeds are relative to
+ * other KUnit tests.
+ *
+ * Note: unset speed attribute acts as default of KUNIT_SPEED_NORMAL.
+ */
+enum kunit_speed {
+ KUNIT_SPEED_UNSET,
+ KUNIT_SPEED_VERY_SLOW,
+ KUNIT_SPEED_SLOW,
+ KUNIT_SPEED_NORMAL,
+ KUNIT_SPEED_MAX = KUNIT_SPEED_NORMAL,
+};
+
+/* Holds attributes for each test case and suite */
+struct kunit_attributes {
+ enum kunit_speed speed;
+};
+
/**
* struct kunit_case - represents an individual test case.
*
* @run_case: the function representing the actual test case.
* @name: the name of the test case.
* @generate_params: the generator function for parameterized tests.
+ * @attr: the attributes associated with the test
*
* A test case is a function with the signature,
* ``void (*)(struct kunit *)``
@@ -103,10 +126,12 @@ struct kunit_case {
void (*run_case)(struct kunit *test);
const char *name;
const void* (*generate_params)(const void *prev, char *desc);
+ struct kunit_attributes attr;
/* private: internal use only. */
enum kunit_status status;
- char *log;
+ char *module_name;
+ struct string_stream *log;
};
static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
@@ -130,7 +155,32 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
* &struct kunit_case object from it. See the documentation for
* &struct kunit_case for an example on how to use it.
*/
-#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }
+#define KUNIT_CASE(test_name) \
+ { .run_case = test_name, .name = #test_name, \
+ .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_ATTR - A helper for creating a &struct kunit_case
+ * with attributes
+ *
+ * @test_name: a reference to a test case function.
+ * @attributes: a reference to a struct kunit_attributes object containing
+ * test attributes
+ */
+#define KUNIT_CASE_ATTR(test_name, attributes) \
+ { .run_case = test_name, .name = #test_name, \
+ .attr = attributes, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_SLOW - A helper for creating a &struct kunit_case
+ * with the slow attribute
+ *
+ * @test_name: a reference to a test case function.
+ */
+
+#define KUNIT_CASE_SLOW(test_name) \
+ { .run_case = test_name, .name = #test_name, \
+ .attr.speed = KUNIT_SPEED_SLOW, .module_name = KBUILD_MODNAME}
/**
* KUNIT_CASE_PARAM - A helper for creation a parameterized &struct kunit_case
@@ -151,7 +201,21 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
*/
#define KUNIT_CASE_PARAM(test_name, gen_params) \
{ .run_case = test_name, .name = #test_name, \
- .generate_params = gen_params }
+ .generate_params = gen_params, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_PARAM_ATTR - A helper for creating a parameterized &struct
+ * kunit_case with attributes
+ *
+ * @test_name: a reference to a test case function.
+ * @gen_params: a reference to a parameter generator function.
+ * @attributes: a reference to a struct kunit_attributes object containing
+ * test attributes
+ */
+#define KUNIT_CASE_PARAM_ATTR(test_name, gen_params, attributes) \
+ { .run_case = test_name, .name = #test_name, \
+ .generate_params = gen_params, \
+ .attr = attributes, .module_name = KBUILD_MODNAME}
/**
* struct kunit_suite - describes a related collection of &struct kunit_case
@@ -162,12 +226,16 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
* @init: called before every test case.
* @exit: called after every test case.
* @test_cases: a null terminated array of test cases.
+ * @attr: the attributes associated with the test suite
*
* A kunit_suite is a collection of related &struct kunit_case s, such that
* @init is called before every test case and @exit is called after every
* test case, similar to the notion of a *test fixture* or a *test class*
* in other unit testing frameworks like JUnit or Googletest.
*
+ * Note that @exit and @suite_exit will run even if @init or @suite_init
+ * fail: make sure they can handle any inconsistent state which may result.
+ *
* Every &struct kunit_case must be associated with a kunit_suite for KUnit
* to run it.
*/
@@ -178,12 +246,20 @@ struct kunit_suite {
int (*init)(struct kunit *test);
void (*exit)(struct kunit *test);
struct kunit_case *test_cases;
+ struct kunit_attributes attr;
/* private: internal use only */
char status_comment[KUNIT_STATUS_COMMENT_SIZE];
struct dentry *debugfs;
- char *log;
+ struct string_stream *log;
int suite_init_err;
+ bool is_init;
+};
+
+/* Stores an array of suites, end points one past the end */
+struct kunit_suite_set {
+ struct kunit_suite * const *start;
+ struct kunit_suite * const *end;
};
/**
@@ -202,7 +278,7 @@ struct kunit {
/* private: internal use only. */
const char *name; /* Read only after initialization! */
- char *log; /* Points at case log after initialization */
+ struct string_stream *log; /* Points at case log after initialization */
struct kunit_try_catch try_catch;
/* param_value is the current parameter value for a test case. */
const void *param_value;
@@ -233,8 +309,12 @@ static inline void kunit_set_failure(struct kunit *test)
}
bool kunit_enabled(void);
+const char *kunit_action(void);
+const char *kunit_filter_glob(void);
+char *kunit_filter(void);
+char *kunit_filter_action(void);
-void kunit_init_test(struct kunit *test, const char *name, char *log);
+void kunit_init_test(struct kunit *test, const char *name, struct string_stream *log);
int kunit_run_tests(struct kunit_suite *suite);
@@ -243,10 +323,24 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite);
unsigned int kunit_test_case_num(struct kunit_suite *suite,
struct kunit_case *test_case);
+struct kunit_suite_set
+kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ const char *filter_glob,
+ char *filters,
+ char *filter_action,
+ int *err);
+void kunit_free_suite_set(struct kunit_suite_set suite_set);
+
int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites);
void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites);
+void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin);
+void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr);
+
+struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
+ struct kunit_suite_set suite_set);
+
#if IS_BUILTIN(CONFIG_KUNIT)
int kunit_run_all_tests(void);
#else
@@ -281,6 +375,11 @@ static inline int kunit_run_all_tests(void)
#define kunit_test_suite(suite) kunit_test_suites(&suite)
+#define __kunit_init_test_suites(unique_array, ...) \
+ static struct kunit_suite *unique_array[] \
+ __aligned(sizeof(struct kunit_suite *)) \
+ __used __section(".kunit_init_test_suites") = { __VA_ARGS__ }
+
/**
* kunit_test_init_section_suites() - used to register one or more &struct
* kunit_suite containing init functions or
@@ -288,21 +387,21 @@ static inline int kunit_run_all_tests(void)
*
* @__suites: a statically allocated list of &struct kunit_suite.
*
- * This functions identically as kunit_test_suites() except that it suppresses
- * modpost warnings for referencing functions marked __init or data marked
- * __initdata; this is OK because currently KUnit only runs tests upon boot
- * during the init phase or upon loading a module during the init phase.
+ * This functions similar to kunit_test_suites() except that it compiles the
+ * list of suites during init phase.
*
- * NOTE TO KUNIT DEVS: If we ever allow KUnit tests to be run after boot, these
- * tests must be excluded.
+ * This macro also suffixes the array and suite declarations it makes with
+ * _probe; so that modpost suppresses warnings about referencing init data
+ * for symbols named in this manner.
*
- * The only thing this macro does that's different from kunit_test_suites is
- * that it suffixes the array and suite declarations it makes with _probe;
- * modpost suppresses warnings about referencing init data for symbols named in
- * this manner.
+ * Note: these init tests are not able to be run after boot so there is no
+ * "run" debugfs file generated for these tests.
+ *
+ * Also, do not mark the suite or test case structs with __initdata because
+ * they will be used after the init phase with debugfs.
*/
#define kunit_test_init_section_suites(__suites...) \
- __kunit_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
+ __kunit_init_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
##__suites)
#define kunit_test_init_section_suite(suite) \
@@ -321,8 +420,11 @@ enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
* @gfp: flags passed to underlying kmalloc().
*
* Just like `kmalloc_array(...)`, except the allocation is managed by the test case
- * and is automatically cleaned up after the test case concludes. See &struct
- * kunit_resource for more information.
+ * and is automatically cleaned up after the test case concludes. See kunit_add_action()
+ * for more information.
+ *
+ * Note that some internal context data is also allocated with GFP_KERNEL,
+ * regardless of the gfp passed in.
*/
void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
@@ -333,6 +435,9 @@ void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
* @gfp: flags passed to underlying kmalloc().
*
* See kmalloc() and kunit_kmalloc_array() for more information.
+ *
+ * Note that some internal context data is also allocated with GFP_KERNEL,
+ * regardless of the gfp passed in.
*/
static inline void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
{
@@ -375,7 +480,7 @@ static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp
void kunit_cleanup(struct kunit *test);
-void __printf(2, 3) kunit_log_append(char *log, const char *fmt, ...);
+void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, ...);
/**
* kunit_mark_skipped() - Marks @test_or_suite as skipped
@@ -472,7 +577,9 @@ void __printf(2, 3) kunit_log_append(char *log, const char *fmt, ...);
*/
#define KUNIT_SUCCEED(test) do {} while (0)
-void kunit_do_failed_assertion(struct kunit *test,
+void __noreturn __kunit_abort(struct kunit *test);
+
+void __kunit_do_failed_assertion(struct kunit *test,
const struct kunit_loc *loc,
enum kunit_assert_type type,
const struct kunit_assert *assert,
@@ -482,13 +589,15 @@ void kunit_do_failed_assertion(struct kunit *test,
#define _KUNIT_FAILED(test, assert_type, assert_class, assert_format, INITIALIZER, fmt, ...) do { \
static const struct kunit_loc __loc = KUNIT_CURRENT_LOC; \
const struct assert_class __assertion = INITIALIZER; \
- kunit_do_failed_assertion(test, \
- &__loc, \
- assert_type, \
- &__assertion.assert, \
- assert_format, \
- fmt, \
- ##__VA_ARGS__); \
+ __kunit_do_failed_assertion(test, \
+ &__loc, \
+ assert_type, \
+ &__assertion.assert, \
+ assert_format, \
+ fmt, \
+ ##__VA_ARGS__); \
+ if (assert_type == KUNIT_ASSERTION) \
+ __kunit_abort(test); \
} while (0)
@@ -649,7 +758,7 @@ do { \
.right_text = #right, \
}; \
\
- if (likely(strcmp(__left, __right) op 0)) \
+ if (likely((__left) && (__right) && (strcmp(__left, __right) op 0))) \
break; \
\
\
@@ -1414,6 +1523,25 @@ do { \
return NULL; \
}
+/**
+ * KUNIT_ARRAY_PARAM_DESC() - Define test parameter generator from an array.
+ * @name: prefix for the test parameter generator function.
+ * @array: array of test parameters.
+ * @desc_member: structure member from array element to use as description
+ *
+ * Define function @name_gen_params which uses @array to generate parameters.
+ */
+#define KUNIT_ARRAY_PARAM_DESC(name, array, desc_member) \
+ static const void *name##_gen_params(const void *prev, char *desc) \
+ { \
+ typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (__next - (array) < ARRAY_SIZE((array))) { \
+ strscpy(desc, __next->desc_member, KUNIT_PARAM_DESC_SIZE); \
+ return __next; \
+ } \
+ return NULL; \
+ }
+
// TODO(dlatypov@google.com): consider eventually migrating users to explicitly
// include resource.h themselves if they need it.
#include <kunit/resource.h>
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index bb3cb005873e..c819c5d16613 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -82,6 +82,8 @@ struct timer_map {
struct arch_timer_context *emul_ptimer;
};
+void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
+
struct arch_timer_cpu {
struct arch_timer_context timers[NR_KVM_TIMERS];
@@ -94,7 +96,7 @@ struct arch_timer_cpu {
int __init kvm_timer_hyp_init(bool has_gic);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
-int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
@@ -145,4 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt);
void kvm_timer_cpu_up(void);
void kvm_timer_cpu_down(void);
+static inline bool has_cntpoff(void)
+{
+ return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
+}
+
#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 1a6a695ca67a..4b9d8fb393a8 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -12,8 +12,7 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
-#ifdef CONFIG_HW_PERF_EVENTS
-
+#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event;
@@ -63,6 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx);
+void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
@@ -74,9 +74,10 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu_events *kvm_get_pmu_events(void);
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_resync_el0(void);
#define kvm_vcpu_has_pmu(vcpu) \
- (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
+ (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
/*
* Updates the vcpu's view of the pmu events for this cpu.
@@ -92,11 +93,19 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
/*
* Evaluates as true when emulating PMUv3p5, and false otherwise.
*/
-#define kvm_pmu_is_3p5(vcpu) \
- (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
+#define kvm_pmu_is_3p5(vcpu) ({ \
+ u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \
+ u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \
+ \
+ pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \
+})
u8 kvm_arm_pmu_get_pmuver_limit(void);
+u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
+int kvm_arm_set_default_pmu(struct kvm *kvm);
+u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
+u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
#else
struct kvm_pmu {
};
@@ -163,10 +172,31 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{
return 0;
}
+static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
+{
+ return 0;
+}
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
+static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
+{
+ return -ENODEV;
+}
+
+static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
#endif
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index 6e55b9283789..e8fb624013d1 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -26,7 +26,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu)
* revisions. It is thus safe to return the latest, unless
* userspace has instructed us otherwise.
*/
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+ if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) {
if (vcpu->kvm->arch.psci_version)
return vcpu->kvm->arch.psci_version;
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 402b545959af..8cc38e836f54 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -375,8 +375,8 @@ int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
void kvm_vgic_init_cpu_hardware(void);
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level, void *owner);
+int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ unsigned int intid, bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
u32 vintid, struct irq_ops *ops);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
@@ -431,7 +431,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
int vgic_v4_load(struct kvm_vcpu *vcpu);
void vgic_v4_commit(struct kvm_vcpu *vcpu);
-int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
+int vgic_v4_put(struct kvm_vcpu *vcpu);
/* CPU HP callbacks */
void kvm_vgic_cpu_up(void);
diff --git a/include/kvm/iodev.h b/include/kvm/iodev.h
index d75fc4365746..56619e33251e 100644
--- a/include/kvm/iodev.h
+++ b/include/kvm/iodev.h
@@ -55,10 +55,4 @@ static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
: -EOPNOTSUPP;
}
-static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
-{
- if (dev->ops->destructor)
- dev->ops->destructor(dev);
-}
-
#endif /* __KVM_IODEV_H__ */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 7b71dd74baeb..b7165e52b3c6 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,6 +15,7 @@
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
+#include <linux/node.h>
struct irq_domain;
struct irq_domain_ops;
@@ -30,6 +31,7 @@ struct irq_domain_ops;
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/fw_table.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -37,6 +39,16 @@ struct irq_domain_ops;
#include <acpi/acpi_io.h>
#include <asm/acpi.h>
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
{
return adev ? adev->handle : NULL;
@@ -70,19 +82,6 @@ static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
kfree(fwnode);
}
-/**
- * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
- * the PCI-defined class-code information
- *
- * @_cls : the class, subclass, prog-if triple for this device
- * @_msk : the class mask for this device
- *
- * This macro is used to create a struct acpi_device_id that matches a
- * specific PCI class. The .id and .driver_data fields will be left
- * initialized with the default value.
- */
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
-
static inline bool has_acpi_companion(struct device *dev)
{
return is_acpi_device_node(dev->fwnode);
@@ -132,21 +131,8 @@ enum acpi_address_range_id {
/* Table Handlers */
-union acpi_subtable_headers {
- struct acpi_subtable_header common;
- struct acpi_hmat_structure hmat;
- struct acpi_prmt_module_header prmt;
- struct acpi_cedt_header cedt;
-};
-
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
-typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
- const unsigned long end);
-
-typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
- void *arg, const unsigned long end);
-
/* Debugger support */
struct acpi_debugger_ops {
@@ -220,14 +206,6 @@ static inline int acpi_debugger_notify_command_complete(void)
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-struct acpi_subtable_proc {
- int id;
- acpi_tbl_entry_handler handler;
- acpi_tbl_entry_handler_arg handler_arg;
- void *arg;
- int count;
-};
-
void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
@@ -242,16 +220,6 @@ void acpi_reserve_initial_tables (void);
void acpi_table_init_complete (void);
int acpi_table_init (void);
-#ifdef CONFIG_ACPI_TABLE_LIB
-#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
-#define __init_or_acpilib
-#define __initdata_or_acpilib
-#else
-#define EXPORT_SYMBOL_ACPI_LIB(x)
-#define __init_or_acpilib __init
-#define __initdata_or_acpilib __initdata
-#endif
-
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init_or_acpilib acpi_table_parse_entries(char *id,
unsigned long table_size, int entry_id,
@@ -269,10 +237,15 @@ acpi_table_parse_cedt(enum acpi_cedt_type id,
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
+static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc)
+{
+ return gicc->flags & ACPI_MADT_ENABLED;
+}
+
/* the following numa functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
-#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH)
+#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
static inline void
@@ -414,6 +387,8 @@ extern bool acpi_is_pnp_device(struct acpi_device *);
typedef void (*wmi_notify_handler) (u32 value, void *context);
+int wmi_instance_count(const char *guid);
+
extern acpi_status wmi_evaluate_method(const char *guid, u8 instance,
u32 method_id,
const struct acpi_buffer *in,
@@ -450,6 +425,23 @@ extern int acpi_blacklisted(void);
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
+#ifdef CONFIG_ACPI_THERMAL_LIB
+int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
+int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
+#endif
+
+#ifdef CONFIG_ACPI_HMAT
+int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord);
+#else
+static inline int acpi_get_genport_coordinates(u32 uid,
+ struct access_coordinate *coord)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#ifdef CONFIG_ACPI_NUMA
int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
@@ -488,8 +480,6 @@ static inline int acpi_get_node(acpi_handle handle)
return 0;
}
#endif
-extern int acpi_paddr_to_node(u64 start_addr, u64 size);
-
extern int pnpacpi_disabled;
#define PXM_INVAL (-1)
@@ -712,13 +702,15 @@ int acpi_match_platform_list(const struct acpi_platform_list *plat);
extern void acpi_early_init(void);
extern void acpi_subsystem_init(void);
-extern void arch_post_acpi_subsys_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
void *data);
+const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids,
+ const struct acpi_device *adev);
+
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
@@ -781,7 +773,10 @@ const char *acpi_get_subsystem_id(acpi_handle handle);
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
+
+/* Get rid of the -Wunused-variable for adev */
+#define acpi_dev_uid_match(adev, uid2) (adev && false)
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false)
#include <acpi/acpi_numa.h>
@@ -799,12 +794,6 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
struct acpi_device;
-static inline bool
-acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
-{
- return false;
-}
-
static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
{
return -ENODEV;
@@ -935,6 +924,12 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
struct acpi_device_id;
+static inline const struct acpi_device_id *acpi_match_acpi_device(
+ const struct acpi_device_id *ids, const struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline const struct acpi_device_id *acpi_match_device(
const struct acpi_device_id *ids, const struct device *dev)
{
@@ -1084,6 +1079,8 @@ static inline bool acpi_sleep_state_supported(u8 sleep_state)
#endif /* !CONFIG_ACPI */
+extern void arch_post_acpi_subsys_init(void);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_ioapic_add(acpi_handle root);
#else
@@ -1102,7 +1099,7 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-#ifdef CONFIG_X86
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
struct acpi_s2idle_dev_ops {
struct list_head list_node;
void (*prepare)(void);
@@ -1111,15 +1108,14 @@ struct acpi_s2idle_dev_ops {
};
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
-#endif /* CONFIG_X86 */
-#ifndef CONFIG_IA64
-void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
-#else
-static inline void arch_reserve_mem_area(acpi_physical_address addr,
- size_t size)
+int acpi_get_lps0_constraint(struct acpi_device *adev);
+#else /* CONFIG_SUSPEND && CONFIG_X86 */
+static inline int acpi_get_lps0_constraint(struct device *dev)
{
+ return ACPI_STATE_UNKNOWN;
}
-#endif /* CONFIG_X86 */
+#endif /* CONFIG_SUSPEND && CONFIG_X86 */
+void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
#endif
@@ -1478,6 +1474,15 @@ static inline int lpit_read_residency_count_address(u64 *address)
}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+#ifndef arch_get_idle_state_flags
+static inline unsigned int arch_get_idle_state_flags(u32 arch_flags)
+{
+ return 0;
+}
+#endif
+#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
+
#ifdef CONFIG_ACPI_PPTT
int acpi_pptt_cpu_is_thread(unsigned int cpu);
int find_acpi_cpu_topology(unsigned int cpu, int level);
@@ -1507,6 +1512,12 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
}
#endif
+#ifdef CONFIG_ARM64
+void acpi_arm_init(void);
+#else
+static inline void acpi_arm_init(void) { }
+#endif
+
#ifdef CONFIG_ACPI_PCC
void acpi_init_pcc(void);
#else
@@ -1531,4 +1542,9 @@ static inline void acpi_device_notify(struct device *dev) { }
static inline void acpi_device_notify_remove(struct device *dev) { }
#endif
+static inline void acpi_use_parent_companion(struct device *dev)
+{
+ ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent));
+}
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_agdi.h b/include/linux/acpi_agdi.h
deleted file mode 100644
index f477f0b452fa..000000000000
--- a/include/linux/acpi_agdi.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef __ACPI_AGDI_H__
-#define __ACPI_AGDI_H__
-
-#include <linux/acpi.h>
-
-#ifdef CONFIG_ACPI_AGDI
-void __init acpi_agdi_init(void);
-#else
-static inline void acpi_agdi_init(void) {}
-#endif
-#endif /* __ACPI_AGDI_H__ */
diff --git a/include/linux/acpi_amd_wbrf.h b/include/linux/acpi_amd_wbrf.h
new file mode 100644
index 000000000000..898f31d536d4
--- /dev/null
+++ b/include/linux/acpi_amd_wbrf.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Wifi Band Exclusion Interface (AMD ACPI Implementation)
+ * Copyright (C) 2023 Advanced Micro Devices
+ */
+
+#ifndef _ACPI_AMD_WBRF_H
+#define _ACPI_AMD_WBRF_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+/* The maximum number of frequency band ranges */
+#define MAX_NUM_OF_WBRF_RANGES 11
+
+/* Record actions */
+#define WBRF_RECORD_ADD 0x0
+#define WBRF_RECORD_REMOVE 0x1
+
+/**
+ * struct freq_band_range - Wifi frequency band range definition
+ * @start: start frequency point (in Hz)
+ * @end: end frequency point (in Hz)
+ */
+struct freq_band_range {
+ u64 start;
+ u64 end;
+};
+
+/**
+ * struct wbrf_ranges_in_out - wbrf ranges info
+ * @num_of_ranges: total number of band ranges in this struct
+ * @band_list: array of Wifi band ranges
+ */
+struct wbrf_ranges_in_out {
+ u64 num_of_ranges;
+ struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES];
+};
+
+/**
+ * enum wbrf_notifier_actions - wbrf notifier actions index
+ * @WBRF_CHANGED: there was some frequency band updates. The consumers
+ * should retrieve the latest active frequency bands.
+ */
+enum wbrf_notifier_actions {
+ WBRF_CHANGED,
+};
+
+#if IS_ENABLED(CONFIG_AMD_WBRF)
+bool acpi_amd_wbrf_supported_producer(struct device *dev);
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in);
+bool acpi_amd_wbrf_supported_consumer(struct device *dev);
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out);
+int amd_wbrf_register_notifier(struct notifier_block *nb);
+int amd_wbrf_unregister_notifier(struct notifier_block *nb);
+#else
+static inline
+bool acpi_amd_wbrf_supported_consumer(struct device *dev)
+{
+ return false;
+}
+
+static inline
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in)
+{
+ return -ENODEV;
+}
+
+static inline
+bool acpi_amd_wbrf_supported_producer(struct device *dev)
+{
+ return false;
+}
+static inline
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_register_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_unregister_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_AMD_WBRF */
+
+#endif /* _ACPI_AMD_WBRF_H */
diff --git a/include/linux/acpi_apmt.h b/include/linux/acpi_apmt.h
deleted file mode 100644
index 40bd634d082f..000000000000
--- a/include/linux/acpi_apmt.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * ARM CoreSight PMU driver.
- * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES.
- *
- */
-
-#ifndef __ACPI_APMT_H__
-#define __ACPI_APMT_H__
-
-#include <linux/acpi.h>
-
-#ifdef CONFIG_ACPI_APMT
-void acpi_apmt_init(void);
-#else
-static inline void acpi_apmt_init(void) { }
-#endif /* CONFIG_ACPI_APMT */
-
-#endif /* __ACPI_APMT_H__ */
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index b43be0987b19..1cb65592c95d 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -21,18 +21,19 @@
*/
#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */
int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
+int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+
#ifdef CONFIG_ACPI_IORT
-void acpi_iort_init(void);
u32 iort_msi_map_id(struct device *dev, u32 id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev);
-int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
struct list_head *head);
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
@@ -43,7 +44,6 @@ int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
-static inline void acpi_iort_init(void) { }
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
{ return id; }
static inline struct irq_domain *iort_get_device_domain(
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 97f64ba1b34a..ae0fae70d4bd 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -19,10 +19,10 @@
struct pci_dev;
struct aer_header_log_regs {
- unsigned int dw0;
- unsigned int dw1;
- unsigned int dw2;
- unsigned int dw3;
+ u32 dw0;
+ u32 dw1;
+ u32 dw2;
+ u32 dw3;
};
struct aer_capability_regs {
@@ -41,30 +41,17 @@ struct aer_capability_regs {
};
#if defined(CONFIG_PCIEAER)
-/* PCIe port driver needs this function to enable AER */
-int pci_enable_pcie_error_reporting(struct pci_dev *dev);
-int pci_disable_pcie_error_reporting(struct pci_dev *dev);
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
-void pci_save_aer_state(struct pci_dev *dev);
-void pci_restore_aer_state(struct pci_dev *dev);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
-static inline void pci_save_aer_state(struct pci_dev *dev) {}
-static inline void pci_restore_aer_state(struct pci_dev *dev) {}
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 5001e14c5c06..c60a6a14638c 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -107,7 +107,7 @@ enum amba_vendor {
extern struct bus_type amba_bustype;
-#define to_amba_device(d) container_of(d, struct amba_device, dev)
+#define to_amba_device(d) container_of_const(d, struct amba_device, dev)
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h
deleted file mode 100644
index 421b0fa90d6a..000000000000
--- a/include/linux/amba/clcd-regs.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef AMBA_CLCD_REGS_H
-#define AMBA_CLCD_REGS_H
-
-/*
- * CLCD Controller Internal Register addresses
- */
-#define CLCD_TIM0 0x00000000
-#define CLCD_TIM1 0x00000004
-#define CLCD_TIM2 0x00000008
-#define CLCD_TIM3 0x0000000c
-#define CLCD_UBAS 0x00000010
-#define CLCD_LBAS 0x00000014
-
-#define CLCD_PL110_IENB 0x00000018
-#define CLCD_PL110_CNTL 0x0000001c
-#define CLCD_PL110_STAT 0x00000020
-#define CLCD_PL110_INTR 0x00000024
-#define CLCD_PL110_UCUR 0x00000028
-#define CLCD_PL110_LCUR 0x0000002C
-
-#define CLCD_PL111_CNTL 0x00000018
-#define CLCD_PL111_IENB 0x0000001c
-#define CLCD_PL111_RIS 0x00000020
-#define CLCD_PL111_MIS 0x00000024
-#define CLCD_PL111_ICR 0x00000028
-#define CLCD_PL111_UCUR 0x0000002c
-#define CLCD_PL111_LCUR 0x00000030
-
-#define CLCD_PALL 0x00000200
-#define CLCD_PALETTE 0x00000200
-
-#define TIM2_PCD_LO_MASK GENMASK(4, 0)
-#define TIM2_PCD_LO_BITS 5
-#define TIM2_CLKSEL (1 << 5)
-#define TIM2_ACB_MASK GENMASK(10, 6)
-#define TIM2_IVS (1 << 11)
-#define TIM2_IHS (1 << 12)
-#define TIM2_IPC (1 << 13)
-#define TIM2_IOE (1 << 14)
-#define TIM2_BCD (1 << 26)
-#define TIM2_PCD_HI_MASK GENMASK(31, 27)
-#define TIM2_PCD_HI_BITS 5
-#define TIM2_PCD_HI_SHIFT 27
-
-#define CNTL_LCDEN (1 << 0)
-#define CNTL_LCDBPP1 (0 << 1)
-#define CNTL_LCDBPP2 (1 << 1)
-#define CNTL_LCDBPP4 (2 << 1)
-#define CNTL_LCDBPP8 (3 << 1)
-#define CNTL_LCDBPP16 (4 << 1)
-#define CNTL_LCDBPP16_565 (6 << 1)
-#define CNTL_LCDBPP16_444 (7 << 1)
-#define CNTL_LCDBPP24 (5 << 1)
-#define CNTL_LCDBW (1 << 4)
-#define CNTL_LCDTFT (1 << 5)
-#define CNTL_LCDMONO8 (1 << 6)
-#define CNTL_LCDDUAL (1 << 7)
-#define CNTL_BGR (1 << 8)
-#define CNTL_BEBO (1 << 9)
-#define CNTL_BEPO (1 << 10)
-#define CNTL_LCDPWR (1 << 11)
-#define CNTL_LCDVCOMP(x) ((x) << 12)
-#define CNTL_LDMAFIFOTIME (1 << 15)
-#define CNTL_WATERMARK (1 << 16)
-
-/* ST Microelectronics variant bits */
-#define CNTL_ST_1XBPP_444 0x0
-#define CNTL_ST_1XBPP_5551 (1 << 17)
-#define CNTL_ST_1XBPP_565 (1 << 18)
-#define CNTL_ST_CDWID_12 0x0
-#define CNTL_ST_CDWID_16 (1 << 19)
-#define CNTL_ST_CDWID_18 (1 << 20)
-#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20))
-#define CNTL_ST_CEAEN (1 << 21)
-#define CNTL_ST_LCDBPP24_PACKED (6 << 1)
-
-#endif /* AMBA_CLCD_REGS_H */
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
deleted file mode 100644
index b6e0cbeaf533..000000000000
--- a/include/linux/amba/clcd.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel.
- *
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-#include <linux/fb.h>
-#include <linux/amba/clcd-regs.h>
-
-enum {
- /* individual formats */
- CLCD_CAP_RGB444 = (1 << 0),
- CLCD_CAP_RGB5551 = (1 << 1),
- CLCD_CAP_RGB565 = (1 << 2),
- CLCD_CAP_RGB888 = (1 << 3),
- CLCD_CAP_BGR444 = (1 << 4),
- CLCD_CAP_BGR5551 = (1 << 5),
- CLCD_CAP_BGR565 = (1 << 6),
- CLCD_CAP_BGR888 = (1 << 7),
-
- /* connection layouts */
- CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444,
- CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551,
- CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565,
- CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888,
-
- /* red/blue ordering */
- CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 |
- CLCD_CAP_RGB565 | CLCD_CAP_RGB888,
- CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 |
- CLCD_CAP_BGR565 | CLCD_CAP_BGR888,
-
- CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
-};
-
-struct backlight_device;
-
-struct clcd_panel {
- struct fb_videomode mode;
- signed short width; /* width in mm */
- signed short height; /* height in mm */
- u32 tim2;
- u32 tim3;
- u32 cntl;
- u32 caps;
- unsigned int bpp:8,
- fixedtimings:1,
- grayscale:1;
- unsigned int connector;
- struct backlight_device *backlight;
- /*
- * If the B/R lines are switched between the CLCD
- * and the panel we need to know this and not try to
- * compensate with the BGR bit in the control register.
- */
- bool bgr_connection;
-};
-
-struct clcd_regs {
- u32 tim0;
- u32 tim1;
- u32 tim2;
- u32 tim3;
- u32 cntl;
- unsigned long pixclock;
-};
-
-struct clcd_fb;
-
-/*
- * the board-type specific routines
- */
-struct clcd_board {
- const char *name;
-
- /*
- * Optional. Hardware capability flags.
- */
- u32 caps;
-
- /*
- * Optional. Check whether the var structure is acceptable
- * for this display.
- */
- int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var);
-
- /*
- * Compulsory. Decode fb->fb.var into regs->*. In the case of
- * fixed timing, set regs->* to the register values required.
- */
- void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs);
-
- /*
- * Optional. Disable any extra display hardware.
- */
- void (*disable)(struct clcd_fb *);
-
- /*
- * Optional. Enable any extra display hardware.
- */
- void (*enable)(struct clcd_fb *);
-
- /*
- * Setup platform specific parts of CLCD driver
- */
- int (*setup)(struct clcd_fb *);
-
- /*
- * mmap the framebuffer memory
- */
- int (*mmap)(struct clcd_fb *, struct vm_area_struct *);
-
- /*
- * Remove platform specific parts of CLCD driver
- */
- void (*remove)(struct clcd_fb *);
-};
-
-struct amba_device;
-struct clk;
-
-/* this data structure describes each frame buffer device we find */
-struct clcd_fb {
- struct fb_info fb;
- struct amba_device *dev;
- struct clk *clk;
- struct clcd_panel *panel;
- struct clcd_board *board;
- void *board_data;
- void __iomem *regs;
- u16 off_ienb;
- u16 off_cntl;
- u32 clcd_cntl;
- u32 cmap[16];
- bool clk_enabled;
-};
-
-static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val, cpl;
-
- /*
- * Program the CLCD controller registers and start the CLCD
- */
- val = ((var->xres / 16) - 1) << 2;
- val |= (var->hsync_len - 1) << 8;
- val |= (var->right_margin - 1) << 16;
- val |= (var->left_margin - 1) << 24;
- regs->tim0 = val;
-
- val = var->yres;
- if (fb->panel->cntl & CNTL_LCDDUAL)
- val /= 2;
- val -= 1;
- val |= (var->vsync_len - 1) << 10;
- val |= var->lower_margin << 16;
- val |= var->upper_margin << 24;
- regs->tim1 = val;
-
- val = fb->panel->tim2;
- val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
- val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
-
- cpl = var->xres_virtual;
- if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */
- /* / 1 */;
- else if (!var->grayscale) /* STN color */
- cpl = cpl * 8 / 3;
- else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */
- cpl /= 8;
- else /* STN monochrome, 4bit */
- cpl /= 4;
-
- regs->tim2 = val | ((cpl - 1) << 16);
-
- regs->tim3 = fb->panel->tim3;
-
- val = fb->panel->cntl;
- if (var->grayscale)
- val |= CNTL_LCDBW;
-
- if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) {
- /*
- * if board and panel supply capabilities, we can support
- * changing BGR/RGB depending on supplied parameters. Here
- * we switch to what the framebuffer is providing if need
- * be, so if the framebuffer is BGR but the display connection
- * is RGB (first case) we switch it around. Vice versa mutatis
- * mutandis if the framebuffer is RGB but the display connection
- * is BGR, we flip it around.
- */
- if (var->red.offset == 0)
- val &= ~CNTL_BGR;
- else
- val |= CNTL_BGR;
- if (fb->panel->bgr_connection)
- val ^= CNTL_BGR;
- }
-
- switch (var->bits_per_pixel) {
- case 1:
- val |= CNTL_LCDBPP1;
- break;
- case 2:
- val |= CNTL_LCDBPP2;
- break;
- case 4:
- val |= CNTL_LCDBPP4;
- break;
- case 8:
- val |= CNTL_LCDBPP8;
- break;
- case 16:
- /*
- * PL110 cannot choose between 5551 and 565 modes in its
- * control register. It is possible to use 565 with
- * custom external wiring.
- */
- if (amba_part(fb->dev) == 0x110 ||
- var->green.length == 5)
- val |= CNTL_LCDBPP16;
- else if (var->green.length == 6)
- val |= CNTL_LCDBPP16_565;
- else
- val |= CNTL_LCDBPP16_444;
- break;
- case 32:
- val |= CNTL_LCDBPP24;
- break;
- }
-
- regs->cntl = val;
- regs->pixclock = var->pixclock;
-}
-
-static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
-{
- var->xres_virtual = var->xres = (var->xres + 15) & ~15;
- var->yres_virtual = var->yres = (var->yres + 1) & ~1;
-
-#define CHECK(e,l,h) (var->e < l || var->e > h)
- if (CHECK(right_margin, (5+1), 256) || /* back porch */
- CHECK(left_margin, (5+1), 256) || /* front porch */
- CHECK(hsync_len, (5+1), 256) ||
- var->xres > 4096 ||
- var->lower_margin > 255 || /* back porch */
- var->upper_margin > 255 || /* front porch */
- var->vsync_len > 32 ||
- var->yres > 1024)
- return -EINVAL;
-#undef CHECK
-
- /* single panel mode: PCD = max(PCD, 1) */
- /* dual panel mode: PCD = max(PCD, 5) */
-
- /*
- * You can't change the grayscale setting, and
- * we can only do non-interlaced video.
- */
- if (var->grayscale != fb->fb.var.grayscale ||
- (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED)
- return -EINVAL;
-
-#define CHECK(e) (var->e != fb->fb.var.e)
- if (fb->panel->fixedtimings &&
- (CHECK(xres) ||
- CHECK(yres) ||
- CHECK(bits_per_pixel) ||
- CHECK(pixclock) ||
- CHECK(left_margin) ||
- CHECK(right_margin) ||
- CHECK(upper_margin) ||
- CHECK(lower_margin) ||
- CHECK(hsync_len) ||
- CHECK(vsync_len) ||
- CHECK(sync)))
- return -EINVAL;
-#undef CHECK
-
- var->nonstd = 0;
- var->accel_flags = 0;
-
- return 0;
-}
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index a1307b58cc2c..9120de05ead0 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -10,6 +10,11 @@
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
+#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#endif
+
#include <linux/types.h>
/* -------------------------------------------------------------------------------
@@ -70,141 +75,145 @@
#define ZX_UART011_ICR 0x4c
#define ZX_UART011_DMACR 0x50
-#define UART011_DR_OE (1 << 11)
-#define UART011_DR_BE (1 << 10)
-#define UART011_DR_PE (1 << 9)
-#define UART011_DR_FE (1 << 8)
-
-#define UART01x_RSR_OE 0x08
-#define UART01x_RSR_BE 0x04
-#define UART01x_RSR_PE 0x02
-#define UART01x_RSR_FE 0x01
-
-#define UART011_FR_RI 0x100
-#define UART011_FR_TXFE 0x080
-#define UART011_FR_RXFF 0x040
-#define UART01x_FR_TXFF 0x020
-#define UART01x_FR_RXFE 0x010
-#define UART01x_FR_BUSY 0x008
-#define UART01x_FR_DCD 0x004
-#define UART01x_FR_DSR 0x002
-#define UART01x_FR_CTS 0x001
+#define UART011_DR_OE BIT(11)
+#define UART011_DR_BE BIT(10)
+#define UART011_DR_PE BIT(9)
+#define UART011_DR_FE BIT(8)
+
+#define UART01x_RSR_OE BIT(3)
+#define UART01x_RSR_BE BIT(2)
+#define UART01x_RSR_PE BIT(1)
+#define UART01x_RSR_FE BIT(0)
+
+#define UART011_FR_RI BIT(8)
+#define UART011_FR_TXFE BIT(7)
+#define UART011_FR_RXFF BIT(6)
+#define UART01x_FR_TXFF (1 << 5) /* used in ASM */
+#define UART01x_FR_RXFE BIT(4)
+#define UART01x_FR_BUSY (1 << 3) /* used in ASM */
+#define UART01x_FR_DCD BIT(2)
+#define UART01x_FR_DSR BIT(1)
+#define UART01x_FR_CTS BIT(0)
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
/*
* Some bits of Flag Register on ZTE device have different position from
* standard ones.
*/
-#define ZX_UART01x_FR_BUSY 0x100
-#define ZX_UART01x_FR_DSR 0x008
-#define ZX_UART01x_FR_CTS 0x002
-#define ZX_UART011_FR_RI 0x001
-
-#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
-#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */
-#define UART011_CR_OUT2 0x2000 /* OUT2 */
-#define UART011_CR_OUT1 0x1000 /* OUT1 */
-#define UART011_CR_RTS 0x0800 /* RTS */
-#define UART011_CR_DTR 0x0400 /* DTR */
-#define UART011_CR_RXE 0x0200 /* receive enable */
-#define UART011_CR_TXE 0x0100 /* transmit enable */
-#define UART011_CR_LBE 0x0080 /* loopback enable */
-#define UART010_CR_RTIE 0x0040
-#define UART010_CR_TIE 0x0020
-#define UART010_CR_RIE 0x0010
-#define UART010_CR_MSIE 0x0008
-#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */
-#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */
-#define UART01x_CR_SIREN 0x0002 /* SIR enable */
-#define UART01x_CR_UARTEN 0x0001 /* UART enable */
-
-#define UART011_LCRH_SPS 0x80
+#define ZX_UART01x_FR_BUSY BIT(8)
+#define ZX_UART01x_FR_DSR BIT(3)
+#define ZX_UART01x_FR_CTS BIT(1)
+#define ZX_UART011_FR_RI BIT(0)
+
+#define UART011_CR_CTSEN BIT(15) /* CTS hardware flow control */
+#define UART011_CR_RTSEN BIT(14) /* RTS hardware flow control */
+#define UART011_CR_OUT2 BIT(13) /* OUT2 */
+#define UART011_CR_OUT1 BIT(12) /* OUT1 */
+#define UART011_CR_RTS BIT(11) /* RTS */
+#define UART011_CR_DTR BIT(10) /* DTR */
+#define UART011_CR_RXE BIT(9) /* receive enable */
+#define UART011_CR_TXE BIT(8) /* transmit enable */
+#define UART011_CR_LBE BIT(7) /* loopback enable */
+#define UART010_CR_RTIE BIT(6)
+#define UART010_CR_TIE BIT(5)
+#define UART010_CR_RIE BIT(4)
+#define UART010_CR_MSIE BIT(3)
+#define ST_UART011_CR_OVSFACT BIT(3) /* Oversampling factor */
+#define UART01x_CR_IIRLP BIT(2) /* SIR low power mode */
+#define UART01x_CR_SIREN BIT(1) /* SIR enable */
+#define UART01x_CR_UARTEN BIT(0) /* UART enable */
+
+#define UART011_LCRH_SPS BIT(7)
#define UART01x_LCRH_WLEN_8 0x60
#define UART01x_LCRH_WLEN_7 0x40
#define UART01x_LCRH_WLEN_6 0x20
#define UART01x_LCRH_WLEN_5 0x00
-#define UART01x_LCRH_FEN 0x10
-#define UART01x_LCRH_STP2 0x08
-#define UART01x_LCRH_EPS 0x04
-#define UART01x_LCRH_PEN 0x02
-#define UART01x_LCRH_BRK 0x01
-
-#define ST_UART011_DMAWM_RX_1 (0 << 3)
-#define ST_UART011_DMAWM_RX_2 (1 << 3)
-#define ST_UART011_DMAWM_RX_4 (2 << 3)
-#define ST_UART011_DMAWM_RX_8 (3 << 3)
-#define ST_UART011_DMAWM_RX_16 (4 << 3)
-#define ST_UART011_DMAWM_RX_32 (5 << 3)
-#define ST_UART011_DMAWM_RX_48 (6 << 3)
-#define ST_UART011_DMAWM_TX_1 0
-#define ST_UART011_DMAWM_TX_2 1
-#define ST_UART011_DMAWM_TX_4 2
-#define ST_UART011_DMAWM_TX_8 3
-#define ST_UART011_DMAWM_TX_16 4
-#define ST_UART011_DMAWM_TX_32 5
-#define ST_UART011_DMAWM_TX_48 6
-
-#define UART010_IIR_RTIS 0x08
-#define UART010_IIR_TIS 0x04
-#define UART010_IIR_RIS 0x02
-#define UART010_IIR_MIS 0x01
-
-#define UART011_IFLS_RX1_8 (0 << 3)
-#define UART011_IFLS_RX2_8 (1 << 3)
-#define UART011_IFLS_RX4_8 (2 << 3)
-#define UART011_IFLS_RX6_8 (3 << 3)
-#define UART011_IFLS_RX7_8 (4 << 3)
-#define UART011_IFLS_TX1_8 (0 << 0)
-#define UART011_IFLS_TX2_8 (1 << 0)
-#define UART011_IFLS_TX4_8 (2 << 0)
-#define UART011_IFLS_TX6_8 (3 << 0)
-#define UART011_IFLS_TX7_8 (4 << 0)
+#define UART01x_LCRH_FEN BIT(4)
+#define UART01x_LCRH_STP2 BIT(3)
+#define UART01x_LCRH_EPS BIT(2)
+#define UART01x_LCRH_PEN BIT(1)
+#define UART01x_LCRH_BRK BIT(0)
+
+#define ST_UART011_DMAWM_RX GENMASK(5, 3)
+#define ST_UART011_DMAWM_RX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 0)
+#define ST_UART011_DMAWM_RX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 1)
+#define ST_UART011_DMAWM_RX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 2)
+#define ST_UART011_DMAWM_RX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 3)
+#define ST_UART011_DMAWM_RX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 4)
+#define ST_UART011_DMAWM_RX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 5)
+#define ST_UART011_DMAWM_RX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 6)
+#define ST_UART011_DMAWM_TX GENMASK(2, 0)
+#define ST_UART011_DMAWM_TX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 0)
+#define ST_UART011_DMAWM_TX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 1)
+#define ST_UART011_DMAWM_TX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 2)
+#define ST_UART011_DMAWM_TX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 3)
+#define ST_UART011_DMAWM_TX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 4)
+#define ST_UART011_DMAWM_TX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 5)
+#define ST_UART011_DMAWM_TX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 6)
+
+#define UART010_IIR_RTIS BIT(3)
+#define UART010_IIR_TIS BIT(2)
+#define UART010_IIR_RIS BIT(1)
+#define UART010_IIR_MIS BIT(0)
+
+#define UART011_IFLS_RXIFLSEL GENMASK(5, 3)
+#define UART011_IFLS_RX1_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 0)
+#define UART011_IFLS_RX2_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 1)
+#define UART011_IFLS_RX4_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 2)
+#define UART011_IFLS_RX6_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 3)
+#define UART011_IFLS_RX7_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 4)
+#define UART011_IFLS_TXIFLSEL GENMASK(2, 0)
+#define UART011_IFLS_TX1_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 0)
+#define UART011_IFLS_TX2_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 1)
+#define UART011_IFLS_TX4_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 2)
+#define UART011_IFLS_TX6_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 3)
+#define UART011_IFLS_TX7_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 4)
/* special values for ST vendor with deeper fifo */
-#define UART011_IFLS_RX_HALF (5 << 3)
-#define UART011_IFLS_TX_HALF (5 << 0)
-
-#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */
-#define UART011_BEIM (1 << 9) /* break error interrupt mask */
-#define UART011_PEIM (1 << 8) /* parity error interrupt mask */
-#define UART011_FEIM (1 << 7) /* framing error interrupt mask */
-#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */
-#define UART011_TXIM (1 << 5) /* transmit interrupt mask */
-#define UART011_RXIM (1 << 4) /* receive interrupt mask */
-#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */
-#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */
-#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */
-#define UART011_RIMIM (1 << 0) /* RI interrupt mask */
-
-#define UART011_OEIS (1 << 10) /* overrun error interrupt status */
-#define UART011_BEIS (1 << 9) /* break error interrupt status */
-#define UART011_PEIS (1 << 8) /* parity error interrupt status */
-#define UART011_FEIS (1 << 7) /* framing error interrupt status */
-#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */
-#define UART011_TXIS (1 << 5) /* transmit interrupt status */
-#define UART011_RXIS (1 << 4) /* receive interrupt status */
-#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */
-#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */
-#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */
-#define UART011_RIMIS (1 << 0) /* RI interrupt status */
-
-#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */
-#define UART011_BEIC (1 << 9) /* break error interrupt clear */
-#define UART011_PEIC (1 << 8) /* parity error interrupt clear */
-#define UART011_FEIC (1 << 7) /* framing error interrupt clear */
-#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */
-#define UART011_TXIC (1 << 5) /* transmit interrupt clear */
-#define UART011_RXIC (1 << 4) /* receive interrupt clear */
-#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */
-#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */
-#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */
-#define UART011_RIMIC (1 << 0) /* RI interrupt clear */
-
-#define UART011_DMAONERR (1 << 2) /* disable dma on error */
-#define UART011_TXDMAE (1 << 1) /* enable transmit dma */
-#define UART011_RXDMAE (1 << 0) /* enable receive dma */
-
-#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE)
-#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
+#define UART011_IFLS_RX_HALF FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 5)
+#define UART011_IFLS_TX_HALF FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 5)
+
+#define UART011_OEIM BIT(10) /* overrun error interrupt mask */
+#define UART011_BEIM BIT(9) /* break error interrupt mask */
+#define UART011_PEIM BIT(8) /* parity error interrupt mask */
+#define UART011_FEIM BIT(7) /* framing error interrupt mask */
+#define UART011_RTIM BIT(6) /* receive timeout interrupt mask */
+#define UART011_TXIM BIT(5) /* transmit interrupt mask */
+#define UART011_RXIM BIT(4) /* receive interrupt mask */
+#define UART011_DSRMIM BIT(3) /* DSR interrupt mask */
+#define UART011_DCDMIM BIT(2) /* DCD interrupt mask */
+#define UART011_CTSMIM BIT(1) /* CTS interrupt mask */
+#define UART011_RIMIM BIT(0) /* RI interrupt mask */
+
+#define UART011_OEIS BIT(10) /* overrun error interrupt status */
+#define UART011_BEIS BIT(9) /* break error interrupt status */
+#define UART011_PEIS BIT(8) /* parity error interrupt status */
+#define UART011_FEIS BIT(7) /* framing error interrupt status */
+#define UART011_RTIS BIT(6) /* receive timeout interrupt status */
+#define UART011_TXIS BIT(5) /* transmit interrupt status */
+#define UART011_RXIS BIT(4) /* receive interrupt status */
+#define UART011_DSRMIS BIT(3) /* DSR interrupt status */
+#define UART011_DCDMIS BIT(2) /* DCD interrupt status */
+#define UART011_CTSMIS BIT(1) /* CTS interrupt status */
+#define UART011_RIMIS BIT(0) /* RI interrupt status */
+
+#define UART011_OEIC BIT(10) /* overrun error interrupt clear */
+#define UART011_BEIC BIT(9) /* break error interrupt clear */
+#define UART011_PEIC BIT(8) /* parity error interrupt clear */
+#define UART011_FEIC BIT(7) /* framing error interrupt clear */
+#define UART011_RTIC BIT(6) /* receive timeout interrupt clear */
+#define UART011_TXIC BIT(5) /* transmit interrupt clear */
+#define UART011_RXIC BIT(4) /* receive interrupt clear */
+#define UART011_DSRMIC BIT(3) /* DSR interrupt clear */
+#define UART011_DCDMIC BIT(2) /* DCD interrupt clear */
+#define UART011_CTSMIC BIT(1) /* CTS interrupt clear */
+#define UART011_RIMIC BIT(0) /* RI interrupt clear */
+
+#define UART011_DMAONERR BIT(2) /* disable dma on error */
+#define UART011_TXDMAE BIT(1) /* enable transmit dma */
+#define UART011_RXDMAE BIT(0) /* enable receive dma */
+
+#define UART01x_RSR_ANY (UART01x_RSR_OE | UART01x_RSR_BE | UART01x_RSR_PE | UART01x_RSR_FE)
+#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD | UART01x_FR_DSR | UART01x_FR_CTS)
#ifndef __ASSEMBLY__
struct amba_device; /* in uncompress this is included but amba/bus.h is not */
@@ -220,8 +229,8 @@ struct amba_pl011_data {
bool dma_rx_poll_enable;
unsigned int dma_rx_poll_rate;
unsigned int dma_rx_poll_timeout;
- void (*init) (void);
- void (*exit) (void);
+ void (*init)(void);
+ void (*exit)(void);
};
#endif
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 953e6f12fa1c..dc7ed2f46886 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -32,128 +32,7 @@ struct task_struct;
struct pci_dev;
extern int amd_iommu_detect(void);
-extern int amd_iommu_init_hardware(void);
-
-/**
- * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
- * @pdev: The PCI device to initialize
- * @pasids: Number of PASIDs to support for this device
- *
- * This function does all setup for the device pdev so that it can be
- * used with IOMMUv2.
- * Returns 0 on success or negative value on error.
- */
-extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids);
-
-/**
- * amd_iommu_free_device() - Free all IOMMUv2 related device resources
- * and disable IOMMUv2 usage for this device
- * @pdev: The PCI device to disable IOMMUv2 usage for'
- */
-extern void amd_iommu_free_device(struct pci_dev *pdev);
-
-/**
- * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device
- * @pdev: The PCI device to bind the task to
- * @pasid: The PASID on the device the task should be bound to
- * @task: the task to bind
- *
- * The function returns 0 on success or a negative value on error.
- */
-extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
- struct task_struct *task);
-
-/**
- * amd_iommu_unbind_pasid() - Unbind a PASID from its task on
- * a device
- * @pdev: The device of the PASID
- * @pasid: The PASID to unbind
- *
- * When this function returns the device is no longer using the PASID
- * and the PASID is no longer bound to its task.
- */
-extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid);
-
-/**
- * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
- * PRI requests
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- *
- * The IOMMUv2 driver invokes this call-back when it is unable to
- * successfully handle a PRI request. The device driver can then decide
- * which PRI response the device should see. Possible return values for
- * the call-back are:
- *
- * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device
- * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device
- * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device,
- * the device is required to disable
- * PRI when it receives this response
- *
- * The function returns 0 on success or negative value on error.
- */
-#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0
-#define AMD_IOMMU_INV_PRI_RSP_INVALID 1
-#define AMD_IOMMU_INV_PRI_RSP_FAIL 2
-
-typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
- u32 pasid,
- unsigned long address,
- u16);
-
-extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
- amd_iommu_invalid_ppr_cb cb);
-
-#define PPR_FAULT_EXEC (1 << 1)
-#define PPR_FAULT_READ (1 << 2)
-#define PPR_FAULT_WRITE (1 << 5)
-#define PPR_FAULT_USER (1 << 6)
-#define PPR_FAULT_RSVD (1 << 7)
-#define PPR_FAULT_GN (1 << 8)
-
-/**
- * amd_iommu_device_info() - Get information about IOMMUv2 support of a
- * PCI device
- * @pdev: PCI device to query information from
- * @info: A pointer to an amd_iommu_device_info structure which will contain
- * the information about the PCI device
- *
- * Returns 0 on success, negative value on error
- */
-
-#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */
-#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution
- on memory pages */
-#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request
- super-user privileges */
-
-struct amd_iommu_device_info {
- int max_pasids;
- u32 flags;
-};
-
-extern int amd_iommu_device_info(struct pci_dev *pdev,
- struct amd_iommu_device_info *info);
-
-/**
- * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating
- * a pasid context. This call-back is
- * invoked when the IOMMUv2 driver needs to
- * invalidate a PASID context, for example
- * because the task that is bound to that
- * context is about to exit.
- *
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- */
-
-typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, u32 pasid);
-extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
- amd_iommu_invalidate_ctx cb);
#else /* CONFIG_AMD_IOMMU */
static inline int amd_iommu_detect(void) { return -ENODEV; }
diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h
new file mode 100644
index 000000000000..b4f818205216
--- /dev/null
+++ b/include/linux/amd-pmf-io.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Platform Management Framework Interface
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#ifndef AMD_PMF_IO_H
+#define AMD_PMF_IO_H
+
+#include <linux/types.h>
+
+/**
+ * enum sfh_message_type - Query the SFH message type
+ * @MT_HPD: Message ID to know the Human presence info from MP2 FW
+ * @MT_ALS: Message ID to know the Ambient light info from MP2 FW
+ */
+enum sfh_message_type {
+ MT_HPD,
+ MT_ALS,
+};
+
+/**
+ * enum sfh_hpd_info - Query the Human presence information
+ * @SFH_NOT_DETECTED: Check the HPD connection information from MP2 FW
+ * @SFH_USER_PRESENT: Check if the user is present from HPD sensor
+ * @SFH_USER_AWAY: Check if the user is away from HPD sensor
+ */
+enum sfh_hpd_info {
+ SFH_NOT_DETECTED,
+ SFH_USER_PRESENT,
+ SFH_USER_AWAY,
+};
+
+/**
+ * struct amd_sfh_info - get HPD sensor info from MP2 FW
+ * @ambient_light: Populates the ambient light information
+ * @user_present: Populates the user presence information
+ */
+struct amd_sfh_info {
+ u32 ambient_light;
+ u8 user_present;
+};
+
+int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op);
+#endif
diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h
index c10ebf8c42e6..6ad02ad9c7b4 100644
--- a/include/linux/amd-pstate.h
+++ b/include/linux/amd-pstate.h
@@ -70,6 +70,10 @@ struct amd_cpudata {
u32 nominal_perf;
u32 lowest_nonlinear_perf;
u32 lowest_perf;
+ u32 min_limit_perf;
+ u32 max_limit_perf;
+ u32 min_limit_freq;
+ u32 max_limit_freq;
u32 max_freq;
u32 min_freq;
@@ -94,7 +98,8 @@ struct amd_cpudata {
* enum amd_pstate_mode - driver working mode of amd pstate
*/
enum amd_pstate_mode {
- AMD_PSTATE_DISABLE = 0,
+ AMD_PSTATE_UNDEFINED = 0,
+ AMD_PSTATE_DISABLE,
AMD_PSTATE_PASSIVE,
AMD_PSTATE_ACTIVE,
AMD_PSTATE_GUIDED,
@@ -102,6 +107,7 @@ enum amd_pstate_mode {
};
static const char * const amd_pstate_mode_string[] = {
+ [AMD_PSTATE_UNDEFINED] = "undefined",
[AMD_PSTATE_DISABLE] = "disable",
[AMD_PSTATE_PASSIVE] = "passive",
[AMD_PSTATE_ACTIVE] = "active",
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 5deaddbd7927..93a5f16d03f3 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -15,13 +15,13 @@ struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
-struct file *anon_inode_getfile_secure(const char *name,
+struct file *anon_inode_create_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags,
const struct inode *context_inode);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
-int anon_inode_getfd_secure(const char *name,
+int anon_inode_create_getfd(const char *name,
const struct file_operations *fops,
void *priv, int flags,
const struct inode *context_inode);
diff --git a/include/linux/aperture.h b/include/linux/aperture.h
index 442f15a57cad..1a9a88b11584 100644
--- a/include/linux/aperture.h
+++ b/include/linux/aperture.h
@@ -14,7 +14,9 @@ int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
resource_size_t size);
int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
- bool primary, const char *name);
+ const char *name);
+
+int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev);
int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name);
#else
@@ -26,7 +28,12 @@ static inline int devm_aperture_acquire_for_platform_device(struct platform_devi
}
static inline int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
- bool primary, const char *name)
+ const char *name)
+{
+ return 0;
+}
+
+static inline int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev)
{
return 0;
}
@@ -39,7 +46,6 @@ static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev,
/**
* aperture_remove_all_conflicting_devices - remove all existing framebuffers
- * @primary: also kick vga16fb if present; only relevant for VGA devices
* @name: a descriptive name of the requesting driver
*
* This function removes all graphics device drivers. Use this function on systems
@@ -48,9 +54,9 @@ static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev,
* Returns:
* 0 on success, or a negative errno code otherwise
*/
-static inline int aperture_remove_all_conflicting_devices(bool primary, const char *name)
+static inline int aperture_remove_all_conflicting_devices(const char *name)
{
- return aperture_remove_conflicting_devices(0, (resource_size_t)-1, primary, name);
+ return aperture_remove_conflicting_devices(0, (resource_size_t)-1, name);
}
#endif
diff --git a/include/linux/apple-mailbox.h b/include/linux/apple-mailbox.h
deleted file mode 100644
index 720fbb70294a..000000000000
--- a/include/linux/apple-mailbox.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
-/*
- * Apple mailbox message format
- *
- * Copyright (C) 2021 The Asahi Linux Contributors
- */
-
-#ifndef _LINUX_APPLE_MAILBOX_H_
-#define _LINUX_APPLE_MAILBOX_H_
-
-#include <linux/types.h>
-
-/* encodes a single 96bit message sent over the single channel */
-struct apple_mbox_msg {
- u64 msg0;
- u32 msg1;
-};
-
-#endif
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index a07b510e7dc5..a63d61ca55af 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -27,6 +27,13 @@ static inline unsigned long topology_get_cpu_scale(int cpu)
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
+DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
+
+static inline unsigned long topology_get_freq_ref(int cpu)
+{
+ return per_cpu(capacity_freq_ref, cpu);
+}
+
DECLARE_PER_CPU(unsigned long, arch_freq_scale);
static inline unsigned long topology_get_freq_scale(int cpu)
@@ -92,6 +99,7 @@ void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
+void freq_inv_set_max_ratio(int cpu, u64 max_rate);
#endif
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/args.h b/include/linux/args.h
new file mode 100644
index 000000000000..8ff60a54eb7d
--- /dev/null
+++ b/include/linux/args.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_ARGS_H
+#define _LINUX_ARGS_H
+
+/*
+ * How do these macros work?
+ *
+ * In __COUNT_ARGS() _0 to _12 are just placeholders from the start
+ * in order to make sure _n is positioned over the correct number
+ * from 12 to 0 (depending on X, which is a variadic argument list).
+ * They serve no purpose other than occupying a position. Since each
+ * macro parameter must have a distinct identifier, those identifiers
+ * are as good as any.
+ *
+ * In COUNT_ARGS() we use actual integers, so __COUNT_ARGS() returns
+ * that as _n.
+ */
+
+/* This counts to 12. Any more, it will return 13th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+/* Concatenate two parameters, but allow them to be expanded beforehand. */
+#define __CONCAT(a, b) a ## b
+#define CONCATENATE(a, b) __CONCAT(a, b)
+
+#endif /* _LINUX_ARGS_H */
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
index d0e44201d855..7f7a576267bc 100644
--- a/include/linux/arm-cci.h
+++ b/include/linux/arm-cci.h
@@ -43,6 +43,8 @@ static inline int __cci_control_port_by_index(u32 port, bool enable)
}
#endif
+void cci_enable_port_for_self(void);
+
#define cci_disable_port_by_device(dev) \
__cci_control_port_by_device(dev, false)
#define cci_enable_port_by_device(dev) \
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index f196c19f8e55..083f85653716 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -5,6 +5,7 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <linux/args.h>
#include <linux/init.h>
#include <uapi/linux/const.h>
@@ -66,6 +67,8 @@
#define ARM_SMCCC_VERSION_1_3 0x10003
#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+#define ARM_SMCCC_CALL_HINTS ARM_SMCCC_1_3_SVE_HINT
+
#define ARM_SMCCC_VERSION_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
@@ -413,31 +416,26 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#endif
-#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
-
-#define __count_args(...) \
- ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
-
-#define __constraint_read_0 "r" (arg0)
-#define __constraint_read_1 __constraint_read_0, "r" (arg1)
-#define __constraint_read_2 __constraint_read_1, "r" (arg2)
-#define __constraint_read_3 __constraint_read_2, "r" (arg3)
-#define __constraint_read_4 __constraint_read_3, "r" (arg4)
-#define __constraint_read_5 __constraint_read_4, "r" (arg5)
-#define __constraint_read_6 __constraint_read_5, "r" (arg6)
-#define __constraint_read_7 __constraint_read_6, "r" (arg7)
+#define __constraint_read_2 "r" (arg0)
+#define __constraint_read_3 __constraint_read_2, "r" (arg1)
+#define __constraint_read_4 __constraint_read_3, "r" (arg2)
+#define __constraint_read_5 __constraint_read_4, "r" (arg3)
+#define __constraint_read_6 __constraint_read_5, "r" (arg4)
+#define __constraint_read_7 __constraint_read_6, "r" (arg5)
+#define __constraint_read_8 __constraint_read_7, "r" (arg6)
+#define __constraint_read_9 __constraint_read_8, "r" (arg7)
-#define __declare_arg_0(a0, res) \
+#define __declare_arg_2(a0, res) \
struct arm_smccc_res *___res = res; \
register unsigned long arg0 asm("r0") = (u32)a0
-#define __declare_arg_1(a0, a1, res) \
+#define __declare_arg_3(a0, a1, res) \
typeof(a1) __a1 = a1; \
struct arm_smccc_res *___res = res; \
register unsigned long arg0 asm("r0") = (u32)a0; \
register typeof(a1) arg1 asm("r1") = __a1
-#define __declare_arg_2(a0, a1, a2, res) \
+#define __declare_arg_4(a0, a1, a2, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
struct arm_smccc_res *___res = res; \
@@ -445,7 +443,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register typeof(a1) arg1 asm("r1") = __a1; \
register typeof(a2) arg2 asm("r2") = __a2
-#define __declare_arg_3(a0, a1, a2, a3, res) \
+#define __declare_arg_5(a0, a1, a2, a3, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
typeof(a3) __a3 = a3; \
@@ -455,34 +453,26 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register typeof(a2) arg2 asm("r2") = __a2; \
register typeof(a3) arg3 asm("r3") = __a3
-#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+#define __declare_arg_6(a0, a1, a2, a3, a4, res) \
typeof(a4) __a4 = a4; \
- __declare_arg_3(a0, a1, a2, a3, res); \
+ __declare_arg_5(a0, a1, a2, a3, res); \
register typeof(a4) arg4 asm("r4") = __a4
-#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, res) \
typeof(a5) __a5 = a5; \
- __declare_arg_4(a0, a1, a2, a3, a4, res); \
+ __declare_arg_6(a0, a1, a2, a3, a4, res); \
register typeof(a5) arg5 asm("r5") = __a5
-#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+#define __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res) \
typeof(a6) __a6 = a6; \
- __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
+ __declare_arg_7(a0, a1, a2, a3, a4, a5, res); \
register typeof(a6) arg6 asm("r6") = __a6
-#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+#define __declare_arg_9(a0, a1, a2, a3, a4, a5, a6, a7, res) \
typeof(a7) __a7 = a7; \
- __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
+ __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res); \
register typeof(a7) arg7 asm("r7") = __a7
-#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
-#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
-
-#define ___constraints(count) \
- : __constraint_read_ ## count \
- : smccc_sve_clobbers "memory"
-#define __constraints(count) ___constraints(count)
-
/*
* We have an output list that is not necessarily used, and GCC feels
* entitled to optimise the whole sequence away. "volatile" is what
@@ -494,11 +484,14 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register unsigned long r1 asm("r1"); \
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3"); \
- __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
asm volatile(SMCCC_SVE_CHECK \
inst "\n" : \
"=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
- __constraints(__count_args(__VA_ARGS__))); \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : smccc_sve_clobbers "memory"); \
if (___res) \
*___res = (typeof(*___res)){r0, r1, r2, r3}; \
} while (0)
@@ -542,8 +535,12 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
*/
#define __fail_smccc_1_1(...) \
do { \
- __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
- asm ("" : __constraints(__count_args(__VA_ARGS__))); \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
+ asm ("" : \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : smccc_sve_clobbers "memory"); \
if (___res) \
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
} while (0)
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index 583fe3b49a49..3d0fde57ba90 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -6,6 +6,7 @@
#ifndef _LINUX_ARM_FFA_H
#define _LINUX_ARM_FFA_H
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -20,6 +21,7 @@
#define FFA_ERROR FFA_SMC_32(0x60)
#define FFA_SUCCESS FFA_SMC_32(0x61)
+#define FFA_FN64_SUCCESS FFA_SMC_64(0x61)
#define FFA_INTERRUPT FFA_SMC_32(0x62)
#define FFA_VERSION FFA_SMC_32(0x63)
#define FFA_FEATURES FFA_SMC_32(0x64)
@@ -54,6 +56,23 @@
#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
+#define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D)
+#define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E)
+#define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F)
+#define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80)
+#define FFA_NOTIFICATION_SET FFA_SMC_32(0x81)
+#define FFA_NOTIFICATION_GET FFA_SMC_32(0x82)
+#define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83)
+#define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83)
+#define FFA_RX_ACQUIRE FFA_SMC_32(0x84)
+#define FFA_SPM_ID_GET FFA_SMC_32(0x85)
+#define FFA_MSG_SEND2 FFA_SMC_32(0x86)
+#define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87)
+#define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87)
+#define FFA_MEM_PERM_GET FFA_SMC_32(0x88)
+#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
+#define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
+#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
/*
* For some calls it is necessary to use SMC64 to pass or return 64-bit values.
@@ -76,6 +95,7 @@
#define FFA_RET_DENIED (-6)
#define FFA_RET_RETRY (-7)
#define FFA_RET_ABORTED (-8)
+#define FFA_RET_NO_DATA (-9)
/* FFA version encoding */
#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
@@ -86,6 +106,7 @@
(FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \
FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
+#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1)
/**
* FF-A specification mentions explicitly about '4K pages'. This should
@@ -94,6 +115,14 @@
*/
#define FFA_PAGE_SIZE SZ_4K
+/*
+ * Minimum buffer size/alignment encodings returned by an FFA_FEATURES
+ * query for FFA_RXTX_MAP.
+ */
+#define FFA_FEAT_RXTX_MIN_SZ_4K 0
+#define FFA_FEAT_RXTX_MIN_SZ_64K 1
+#define FFA_FEAT_RXTX_MIN_SZ_16K 2
+
/* FFA Bus/Device/Driver related */
struct ffa_device {
u32 id;
@@ -180,6 +209,8 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
#define module_ffa_driver(__ffa_driver) \
module_driver(__ffa_driver, ffa_register, ffa_unregister)
+extern struct bus_type ffa_bus_type;
+
/* FFA transport related */
struct ffa_partition_info {
u16 id;
@@ -270,8 +301,8 @@ struct ffa_mem_region {
#define FFA_MEM_NON_SHAREABLE (0)
#define FFA_MEM_OUTER_SHAREABLE (2)
#define FFA_MEM_INNER_SHAREABLE (3)
- u8 attributes;
- u8 reserved_0;
+ /* Memory region attributes, upper byte MBZ pre v1.1 */
+ u16 attributes;
/*
* Clear memory region contents after unmapping it from the sender and
* before mapping it for any receiver.
@@ -309,27 +340,41 @@ struct ffa_mem_region {
* memory region.
*/
u64 tag;
- u32 reserved_1;
+ /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */
+ u32 ep_mem_size;
/*
* The number of `ffa_mem_region_attributes` entries included in this
* transaction.
*/
u32 ep_count;
/*
- * An array of endpoint memory access descriptors.
- * Each one specifies a memory region offset, an endpoint and the
- * attributes with which this memory region should be mapped in that
- * endpoint's page table.
+ * 16-byte aligned offset from the base address of this descriptor
+ * to the first element of the endpoint memory access descriptor array
+ * Valid only from v1.1
*/
- struct ffa_mem_region_attributes ep_mem_access[];
+ u32 ep_mem_offset;
+ /* MBZ, valid only from v1.1 */
+ u32 reserved[3];
};
-#define COMPOSITE_OFFSET(x) \
- (offsetof(struct ffa_mem_region, ep_mem_access[x]))
#define CONSTITUENTS_OFFSET(x) \
(offsetof(struct ffa_composite_mem_region, constituents[x]))
-#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \
- (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y))
+
+static inline u32
+ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
+{
+ u32 offset = count * sizeof(struct ffa_mem_region_attributes);
+ /*
+ * Earlier to v1.1, the endpoint memory descriptor array started at
+ * offset 32(i.e. offset of ep_mem_offset in the current structure)
+ */
+ if (ffa_version <= FFA_VERSION_1_0)
+ offset += offsetof(struct ffa_mem_region, ep_mem_offset);
+ else
+ offset += sizeof(struct ffa_mem_region);
+
+ return offset;
+}
struct ffa_mem_ops_args {
bool use_txbuf;
@@ -359,10 +404,30 @@ struct ffa_mem_ops {
int (*memory_lend)(struct ffa_mem_ops_args *args);
};
+struct ffa_cpu_ops {
+ int (*run)(struct ffa_device *dev, u16 vcpu);
+};
+
+typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data);
+typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data);
+
+struct ffa_notifier_ops {
+ int (*sched_recv_cb_register)(struct ffa_device *dev,
+ ffa_sched_recv_cb cb, void *cb_data);
+ int (*sched_recv_cb_unregister)(struct ffa_device *dev);
+ int (*notify_request)(struct ffa_device *dev, bool per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id);
+ int (*notify_relinquish)(struct ffa_device *dev, int notify_id);
+ int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu,
+ u16 vcpu);
+};
+
struct ffa_ops {
const struct ffa_info_ops *info_ops;
const struct ffa_msg_ops *msg_ops;
const struct ffa_mem_ops *mem_ops;
+ const struct ffa_cpu_ops *cpu_ops;
+ const struct ffa_notifier_ops *notifier_ops;
};
#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 14dc461b0e82..255701e1251b 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -47,10 +47,12 @@ int sdei_unregister_ghes(struct ghes *ghes);
int sdei_mask_local_cpu(void);
int sdei_unmask_local_cpu(void);
void __init sdei_init(void);
+void sdei_handler_abort(void);
#else
static inline int sdei_mask_local_cpu(void) { return 0; }
static inline int sdei_unmask_local_cpu(void) { return 0; }
static inline void sdei_init(void) { }
+static inline void sdei_handler_abort(void) { }
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/include/linux/array_size.h b/include/linux/array_size.h
new file mode 100644
index 000000000000..06d7d83196ca
--- /dev/null
+++ b/include/linux/array_size.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ARRAY_SIZE_H
+#define _LINUX_ARRAY_SIZE_H
+
+#include <linux/compiler.h>
+
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
+#endif /* _LINUX_ARRAY_SIZE_H */
diff --git a/include/linux/async.h b/include/linux/async.h
index cce4ad31e8fc..33c9ff4afb49 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
return async_schedule_node(func, dev, dev_to_node(dev));
}
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
+
/**
* async_schedule_dev_domain - A device specific version of async_schedule_domain
* @func: function to execute asynchronously
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c224dbddb9b2..792e10a09787 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -322,15 +322,21 @@ enum {
ATA_LOG_SATA_NCQ = 0x10,
ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
+ ATA_LOG_CDL = 0x18,
+ ATA_LOG_CDL_SIZE = ATA_SECT_SIZE,
ATA_LOG_IDENTIFY_DEVICE = 0x30,
+ ATA_LOG_SENSE_NCQ = 0x0F,
+ ATA_LOG_SENSE_NCQ_SIZE = ATA_SECT_SIZE * 2,
ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device log pages: */
+ ATA_LOG_SUPPORTED_CAPABILITIES = 0x03,
+ ATA_LOG_CURRENT_SETTINGS = 0x04,
ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09,
- /* Identify device SATA settings log:*/
+ /* Identify device SATA settings log: */
ATA_LOG_DEVSLP_OFFSET = 0x30,
ATA_LOG_DEVSLP_SIZE = 0x08,
ATA_LOG_DEVSLP_MDAT = 0x00,
@@ -415,6 +421,8 @@ enum {
SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */
SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */
+ SETFEATURES_CDL = 0x0d, /* Enable/disable cmd duration limits */
+
/* SETFEATURE Sector counts for SATA features */
SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */
SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */
@@ -425,6 +433,7 @@ enum {
SATA_DEVSLP = 0x09, /* Device Sleep */
SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
+ SETFEATURE_SENSE_DATA_SUCC_NCQ = 0xC4, /* Sense Data for successful NCQ commands */
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
deleted file mode 100644
index 1491af38cc6e..000000000000
--- a/include/linux/atmel-mci.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_ATMEL_MCI_H
-#define __LINUX_ATMEL_MCI_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-#define ATMCI_MAX_NR_SLOTS 2
-
-/**
- * struct mci_slot_pdata - board-specific per-slot configuration
- * @bus_width: Number of data lines wired up the slot
- * @detect_pin: GPIO pin wired to the card detect switch
- * @wp_pin: GPIO pin wired to the write protect sensor
- * @detect_is_active_high: The state of the detect pin when it is active
- * @non_removable: The slot is not removable, only detect once
- *
- * If a given slot is not present on the board, @bus_width should be
- * set to 0. The other fields are ignored in this case.
- *
- * Any pins that aren't available should be set to a negative value.
- *
- * Note that support for multiple slots is experimental -- some cards
- * might get upset if we don't get the clock management exactly right.
- * But in most cases, it should work just fine.
- */
-struct mci_slot_pdata {
- unsigned int bus_width;
- int detect_pin;
- int wp_pin;
- bool detect_is_active_high;
- bool non_removable;
-};
-
-/**
- * struct mci_platform_data - board-specific MMC/SDcard configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- * @slot: Per-slot configuration data.
- */
-struct mci_platform_data {
- void *dma_slave;
- dma_filter_fn dma_filter;
- struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS];
-};
-
-#endif /* __LINUX_ATMEL_MCI_H */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index a6e4437c5f36..5e95faa959c4 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -8,2664 +8,4658 @@
#include <linux/compiler.h>
-#ifndef arch_xchg_relaxed
-#define arch_xchg_acquire arch_xchg
-#define arch_xchg_release arch_xchg
-#define arch_xchg_relaxed arch_xchg
-#else /* arch_xchg_relaxed */
-
-#ifndef arch_xchg_acquire
-#define arch_xchg_acquire(...) \
- __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#if defined(arch_xchg)
+#define raw_xchg arch_xchg
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#else
+extern void raw_xchg_not_implemented(void);
+#define raw_xchg(...) raw_xchg_not_implemented()
#endif
-#ifndef arch_xchg_release
-#define arch_xchg_release(...) \
- __atomic_op_release(arch_xchg, __VA_ARGS__)
+#if defined(arch_xchg_acquire)
+#define raw_xchg_acquire arch_xchg_acquire
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_acquire arch_xchg
+#else
+extern void raw_xchg_acquire_not_implemented(void);
+#define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
#endif
-#ifndef arch_xchg
-#define arch_xchg(...) \
- __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#if defined(arch_xchg_release)
+#define raw_xchg_release arch_xchg_release
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_release arch_xchg
+#else
+extern void raw_xchg_release_not_implemented(void);
+#define raw_xchg_release(...) raw_xchg_release_not_implemented()
+#endif
+
+#if defined(arch_xchg_relaxed)
+#define raw_xchg_relaxed arch_xchg_relaxed
+#elif defined(arch_xchg)
+#define raw_xchg_relaxed arch_xchg
+#else
+extern void raw_xchg_relaxed_not_implemented(void);
+#define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg)
+#define raw_cmpxchg arch_cmpxchg
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#else
+extern void raw_cmpxchg_not_implemented(void);
+#define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
#endif
-#endif /* arch_xchg_relaxed */
-
-#ifndef arch_cmpxchg_relaxed
-#define arch_cmpxchg_acquire arch_cmpxchg
-#define arch_cmpxchg_release arch_cmpxchg
-#define arch_cmpxchg_relaxed arch_cmpxchg
-#else /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg_acquire
-#define arch_cmpxchg_acquire(...) \
+#if defined(arch_cmpxchg_acquire)
+#define raw_cmpxchg_acquire arch_cmpxchg_acquire
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_acquire(...) \
__atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_acquire arch_cmpxchg
+#else
+extern void raw_cmpxchg_acquire_not_implemented(void);
+#define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
#endif
-#ifndef arch_cmpxchg_release
-#define arch_cmpxchg_release(...) \
+#if defined(arch_cmpxchg_release)
+#define raw_cmpxchg_release arch_cmpxchg_release
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_release(...) \
__atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_release arch_cmpxchg
+#else
+extern void raw_cmpxchg_release_not_implemented(void);
+#define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_relaxed arch_cmpxchg
+#else
+extern void raw_cmpxchg_relaxed_not_implemented(void);
+#define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64)
+#define raw_cmpxchg64 arch_cmpxchg64
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#else
+extern void raw_cmpxchg64_not_implemented(void);
+#define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
#endif
-#ifndef arch_cmpxchg
-#define arch_cmpxchg(...) \
- __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg64_relaxed
-#define arch_cmpxchg64_acquire arch_cmpxchg64
-#define arch_cmpxchg64_release arch_cmpxchg64
-#define arch_cmpxchg64_relaxed arch_cmpxchg64
-#else /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_cmpxchg64_acquire
-#define arch_cmpxchg64_acquire(...) \
+#if defined(arch_cmpxchg64_acquire)
+#define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_acquire(...) \
__atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_acquire arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_acquire_not_implemented(void);
+#define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
#endif
-#ifndef arch_cmpxchg64_release
-#define arch_cmpxchg64_release(...) \
+#if defined(arch_cmpxchg64_release)
+#define raw_cmpxchg64_release arch_cmpxchg64_release
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_release(...) \
__atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_release arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_release_not_implemented(void);
+#define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_relaxed_not_implemented(void);
+#define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128)
+#define raw_cmpxchg128 arch_cmpxchg128
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128(...) \
+ __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
+#else
+extern void raw_cmpxchg128_not_implemented(void);
+#define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_acquire)
+#define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_acquire arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_acquire_not_implemented(void);
+#define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_release)
+#define raw_cmpxchg128_release arch_cmpxchg128_release
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_release(...) \
+ __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_release arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_release_not_implemented(void);
+#define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_relaxed_not_implemented(void);
+#define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
+#endif
+
+#if defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg arch_try_cmpxchg
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg(...) \
+ __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
#endif
-#ifndef arch_cmpxchg64
-#define arch_cmpxchg64(...) \
- __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#if defined(arch_try_cmpxchg_acquire)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
#endif
-#endif /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_try_cmpxchg_relaxed
-#ifdef arch_try_cmpxchg
-#define arch_try_cmpxchg_acquire arch_try_cmpxchg
-#define arch_try_cmpxchg_release arch_try_cmpxchg
-#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
-#endif /* arch_try_cmpxchg */
-
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg_release)
+#define raw_try_cmpxchg_release arch_try_cmpxchg_release
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_release(...) \
+ __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_release arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg */
+#endif
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg_acquire */
+#endif
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64 arch_try_cmpxchg64
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64(...) \
+ __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg64(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg_release */
+#endif
-#ifndef arch_try_cmpxchg_relaxed
-#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg64_acquire)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg_relaxed */
-
-#else /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(...) \
- __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
#endif
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(...) \
- __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#if defined(arch_try_cmpxchg64_release)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_release(...) \
+ __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
#endif
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(...) \
- __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#if defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
#endif
-#endif /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_try_cmpxchg64_relaxed
-#ifdef arch_try_cmpxchg64
-#define arch_try_cmpxchg64_acquire arch_try_cmpxchg64
-#define arch_try_cmpxchg64_release arch_try_cmpxchg64
-#define arch_try_cmpxchg64_relaxed arch_try_cmpxchg64
-#endif /* arch_try_cmpxchg64 */
-
-#ifndef arch_try_cmpxchg64
-#define arch_try_cmpxchg64(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128 arch_try_cmpxchg128
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128(...) \
+ __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg128(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64 */
+#endif
-#ifndef arch_try_cmpxchg64_acquire
-#define arch_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg128_acquire)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64_acquire((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64_acquire */
+#endif
-#ifndef arch_try_cmpxchg64_release
-#define arch_try_cmpxchg64_release(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg128_release)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_release(...) \
+ __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64_release((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64_release */
+#endif
-#ifndef arch_try_cmpxchg64_relaxed
-#define arch_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
+#if defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64_relaxed */
+#endif
-#else /* arch_try_cmpxchg64_relaxed */
+#define raw_cmpxchg_local arch_cmpxchg_local
-#ifndef arch_try_cmpxchg64_acquire
-#define arch_try_cmpxchg64_acquire(...) \
- __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
+#ifdef arch_try_cmpxchg_local
+#define raw_try_cmpxchg_local arch_try_cmpxchg_local
+#else
+#define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
#endif
-#ifndef arch_try_cmpxchg64_release
-#define arch_try_cmpxchg64_release(...) \
- __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
-#endif
+#define raw_cmpxchg64_local arch_cmpxchg64_local
-#ifndef arch_try_cmpxchg64
-#define arch_try_cmpxchg64(...) \
- __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
+#ifdef arch_try_cmpxchg64_local
+#define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
+#else
+#define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
#endif
-#endif /* arch_try_cmpxchg64_relaxed */
+#define raw_cmpxchg128_local arch_cmpxchg128_local
-#ifndef arch_try_cmpxchg_local
-#define arch_try_cmpxchg_local(_ptr, _oldp, _new) \
+#ifdef arch_try_cmpxchg128_local
+#define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
+#else
+#define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_local((_ptr), ___o, (_new)); \
+ ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg_local */
+#endif
-#ifndef arch_try_cmpxchg64_local
-#define arch_try_cmpxchg64_local(_ptr, _oldp, _new) \
+#define raw_sync_cmpxchg arch_sync_cmpxchg
+
+#ifdef arch_sync_try_cmpxchg
+#define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
+#else
+#define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
({ \
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg64_local((_ptr), ___o, (_new)); \
+ ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
if (unlikely(___r != ___o)) \
*___op = ___r; \
likely(___r == ___o); \
})
-#endif /* arch_try_cmpxchg64_local */
+#endif
+
+/**
+ * raw_atomic_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+raw_atomic_read(const atomic_t *v)
+{
+ return arch_atomic_read(v);
+}
-#ifndef arch_atomic_read_acquire
+/**
+ * raw_atomic_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
static __always_inline int
-arch_atomic_read_acquire(const atomic_t *v)
+raw_atomic_read_acquire(const atomic_t *v)
{
+#if defined(arch_atomic_read_acquire)
+ return arch_atomic_read_acquire(v);
+#else
int ret;
if (__native_word(atomic_t)) {
ret = smp_load_acquire(&(v)->counter);
} else {
- ret = arch_atomic_read(v);
+ ret = raw_atomic_read(v);
__atomic_acquire_fence();
}
return ret;
-}
-#define arch_atomic_read_acquire arch_atomic_read_acquire
#endif
+}
-#ifndef arch_atomic_set_release
+/**
+ * raw_atomic_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_set_release(atomic_t *v, int i)
+raw_atomic_set(atomic_t *v, int i)
{
+ arch_atomic_set(v, i);
+}
+
+/**
+ * raw_atomic_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_set_release(atomic_t *v, int i)
+{
+#if defined(arch_atomic_set_release)
+ arch_atomic_set_release(v, i);
+#else
if (__native_word(atomic_t)) {
smp_store_release(&(v)->counter, i);
} else {
__atomic_release_fence();
- arch_atomic_set(v, i);
+ raw_atomic_set(v, i);
}
-}
-#define arch_atomic_set_release arch_atomic_set_release
#endif
-
-#ifndef arch_atomic_add_return_relaxed
-#define arch_atomic_add_return_acquire arch_atomic_add_return
-#define arch_atomic_add_return_release arch_atomic_add_return
-#define arch_atomic_add_return_relaxed arch_atomic_add_return
-#else /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_add_return_acquire
-static __always_inline int
-arch_atomic_add_return_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
}
-#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
-#endif
-#ifndef arch_atomic_add_return_release
-static __always_inline int
-arch_atomic_add_return_release(int i, atomic_t *v)
+/**
+ * raw_atomic_add() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_add(int i, atomic_t *v)
{
- __atomic_release_fence();
- return arch_atomic_add_return_relaxed(i, v);
+ arch_atomic_add(i, v);
}
-#define arch_atomic_add_return_release arch_atomic_add_return_release
-#endif
-#ifndef arch_atomic_add_return
+/**
+ * raw_atomic_add_return() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_add_return(int i, atomic_t *v)
+raw_atomic_add_return(int i, atomic_t *v)
{
+#if defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
int ret;
__atomic_pre_full_fence();
ret = arch_atomic_add_return_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_add_return arch_atomic_add_return
+#else
+#error "Unable to define raw_atomic_add_return"
#endif
+}
-#endif /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_fetch_add_relaxed
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add
-#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
-#else /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_fetch_add_acquire
+/**
+ * raw_atomic_add_return_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_add_acquire(int i, atomic_t *v)
+raw_atomic_add_return_acquire(int i, atomic_t *v)
{
- int ret = arch_atomic_fetch_add_relaxed(i, v);
+#if defined(arch_atomic_add_return_acquire)
+ return arch_atomic_add_return_acquire(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ int ret = arch_atomic_add_return_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_acquire"
#endif
+}
-#ifndef arch_atomic_fetch_add_release
+/**
+ * raw_atomic_add_return_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_add_release(int i, atomic_t *v)
+raw_atomic_add_return_release(int i, atomic_t *v)
{
+#if defined(arch_atomic_add_return_release)
+ return arch_atomic_add_return_release(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
__atomic_release_fence();
- return arch_atomic_fetch_add_relaxed(i, v);
+ return arch_atomic_add_return_relaxed(i, v);
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_release"
+#endif
}
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
+
+/**
+ * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_relaxed)
+ return arch_atomic_add_return_relaxed(i, v);
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_relaxed"
#endif
+}
-#ifndef arch_atomic_fetch_add
+/**
+ * raw_atomic_fetch_add() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_add(int i, atomic_t *v)
+raw_atomic_fetch_add(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
int ret;
__atomic_pre_full_fence();
ret = arch_atomic_fetch_add_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_fetch_add arch_atomic_fetch_add
+#else
+#error "Unable to define raw_atomic_fetch_add"
#endif
+}
-#endif /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_sub_return_relaxed
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return
-#define arch_atomic_sub_return_release arch_atomic_sub_return
-#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
-#else /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_sub_return_acquire
+/**
+ * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_sub_return_acquire(int i, atomic_t *v)
+raw_atomic_fetch_add_acquire(int i, atomic_t *v)
{
- int ret = arch_atomic_sub_return_relaxed(i, v);
+#if defined(arch_atomic_fetch_add_acquire)
+ return arch_atomic_fetch_add_acquire(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ int ret = arch_atomic_fetch_add_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_acquire"
#endif
+}
-#ifndef arch_atomic_sub_return_release
+/**
+ * raw_atomic_fetch_add_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_sub_return_release(int i, atomic_t *v)
+raw_atomic_fetch_add_release(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_add_release)
+ return arch_atomic_fetch_add_release(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
__atomic_release_fence();
- return arch_atomic_sub_return_relaxed(i, v);
+ return arch_atomic_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_release"
+#endif
}
-#define arch_atomic_sub_return_release arch_atomic_sub_return_release
+
+/**
+ * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_relaxed)
+ return arch_atomic_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_relaxed"
#endif
+}
-#ifndef arch_atomic_sub_return
+/**
+ * raw_atomic_sub() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_sub(int i, atomic_t *v)
+{
+ arch_atomic_sub(i, v);
+}
+
+/**
+ * raw_atomic_sub_return() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_sub_return(int i, atomic_t *v)
+raw_atomic_sub_return(int i, atomic_t *v)
{
+#if defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
int ret;
__atomic_pre_full_fence();
ret = arch_atomic_sub_return_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_sub_return arch_atomic_sub_return
+#else
+#error "Unable to define raw_atomic_sub_return"
#endif
+}
-#endif /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_fetch_sub_relaxed
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
-#else /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_fetch_sub_acquire
+/**
+ * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
+raw_atomic_sub_return_acquire(int i, atomic_t *v)
{
- int ret = arch_atomic_fetch_sub_relaxed(i, v);
+#if defined(arch_atomic_sub_return_acquire)
+ return arch_atomic_sub_return_acquire(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ int ret = arch_atomic_sub_return_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_acquire"
#endif
+}
-#ifndef arch_atomic_fetch_sub_release
+/**
+ * raw_atomic_sub_return_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_sub_release(int i, atomic_t *v)
+raw_atomic_sub_return_release(int i, atomic_t *v)
{
+#if defined(arch_atomic_sub_return_release)
+ return arch_atomic_sub_return_release(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
__atomic_release_fence();
- return arch_atomic_fetch_sub_relaxed(i, v);
+ return arch_atomic_sub_return_relaxed(i, v);
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_release"
+#endif
}
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
+
+/**
+ * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_relaxed)
+ return arch_atomic_sub_return_relaxed(i, v);
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_relaxed"
#endif
+}
-#ifndef arch_atomic_fetch_sub
+/**
+ * raw_atomic_fetch_sub() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_sub(int i, atomic_t *v)
+raw_atomic_fetch_sub(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
int ret;
__atomic_pre_full_fence();
ret = arch_atomic_fetch_sub_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#else
+#error "Unable to define raw_atomic_fetch_sub"
#endif
-
-#endif /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_inc
-static __always_inline void
-arch_atomic_inc(atomic_t *v)
-{
- arch_atomic_add(1, v);
}
-#define arch_atomic_inc arch_atomic_inc
-#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-#ifdef arch_atomic_inc_return
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return
-#define arch_atomic_inc_return_release arch_atomic_inc_return
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
-#endif /* arch_atomic_inc_return */
-#ifndef arch_atomic_inc_return
+/**
+ * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
+raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
{
- return arch_atomic_add_return(1, v);
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
+#if defined(arch_atomic_fetch_sub_acquire)
+ return arch_atomic_fetch_sub_acquire(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ int ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_acquire"
#endif
-
-#ifndef arch_atomic_inc_return_acquire
-static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
-{
- return arch_atomic_add_return_acquire(1, v);
}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
-#endif
-#ifndef arch_atomic_inc_return_release
+/**
+ * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
+raw_atomic_fetch_sub_release(int i, atomic_t *v)
{
- return arch_atomic_add_return_release(1, v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#if defined(arch_atomic_fetch_sub_release)
+ return arch_atomic_fetch_sub_release(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_release"
#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-static __always_inline int
-arch_atomic_inc_return_relaxed(atomic_t *v)
-{
- return arch_atomic_add_return_relaxed(1, v);
}
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
-#endif
-
-#else /* arch_atomic_inc_return_relaxed */
-#ifndef arch_atomic_inc_return_acquire
+/**
+ * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
+raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
- int ret = arch_atomic_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
+#if defined(arch_atomic_fetch_sub_relaxed)
+ return arch_atomic_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_relaxed"
#endif
+}
-#ifndef arch_atomic_inc_return_release
-static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
+/**
+ * raw_atomic_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_inc(atomic_t *v)
{
- __atomic_release_fence();
- return arch_atomic_inc_return_relaxed(v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
+#if defined(arch_atomic_inc)
+ arch_atomic_inc(v);
+#else
+ raw_atomic_add(1, v);
#endif
+}
-#ifndef arch_atomic_inc_return
+/**
+ * raw_atomic_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
+raw_atomic_inc_return(atomic_t *v)
{
+#if defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#elif defined(arch_atomic_inc_return_relaxed)
int ret;
__atomic_pre_full_fence();
ret = arch_atomic_inc_return_relaxed(v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
+#else
+ return raw_atomic_add_return(1, v);
#endif
+}
-#endif /* arch_atomic_inc_return_relaxed */
-
-#ifndef arch_atomic_fetch_inc_relaxed
-#ifdef arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
-#endif /* arch_atomic_fetch_inc */
-
-#ifndef arch_atomic_fetch_inc
+/**
+ * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
+raw_atomic_inc_return_acquire(atomic_t *v)
{
- return arch_atomic_fetch_add(1, v);
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#if defined(arch_atomic_inc_return_acquire)
+ return arch_atomic_inc_return_acquire(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ int ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_acquire(1, v);
#endif
+}
-#ifndef arch_atomic_fetch_inc_acquire
+/**
+ * raw_atomic_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
+raw_atomic_inc_return_release(atomic_t *v)
{
- return arch_atomic_fetch_add_acquire(1, v);
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#if defined(arch_atomic_inc_return_release)
+ return arch_atomic_inc_return_release(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_inc_return_relaxed(v);
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_release(1, v);
#endif
+}
-#ifndef arch_atomic_fetch_inc_release
+/**
+ * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
+raw_atomic_inc_return_relaxed(atomic_t *v)
{
- return arch_atomic_fetch_add_release(1, v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#if defined(arch_atomic_inc_return_relaxed)
+ return arch_atomic_inc_return_relaxed(v);
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_relaxed(1, v);
#endif
+}
-#ifndef arch_atomic_fetch_inc_relaxed
+/**
+ * raw_atomic_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_inc_relaxed(atomic_t *v)
+raw_atomic_fetch_inc(atomic_t *v)
{
- return arch_atomic_fetch_add_relaxed(1, v);
-}
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
+#if defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_add(1, v);
#endif
+}
-#else /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_fetch_inc_acquire
+/**
+ * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
+raw_atomic_fetch_inc_acquire(atomic_t *v)
{
+#if defined(arch_atomic_fetch_inc_acquire)
+ return arch_atomic_fetch_inc_acquire(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
int ret = arch_atomic_fetch_inc_relaxed(v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_acquire(1, v);
#endif
+}
-#ifndef arch_atomic_fetch_inc_release
+/**
+ * raw_atomic_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
+raw_atomic_fetch_inc_release(atomic_t *v)
{
+#if defined(arch_atomic_fetch_inc_release)
+ return arch_atomic_fetch_inc_release(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
__atomic_release_fence();
return arch_atomic_fetch_inc_relaxed(v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_release(1, v);
#endif
+}
-#ifndef arch_atomic_fetch_inc
+/**
+ * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
+raw_atomic_fetch_inc_relaxed(atomic_t *v)
{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
+#if defined(arch_atomic_fetch_inc_relaxed)
+ return arch_atomic_fetch_inc_relaxed(v);
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_relaxed(1, v);
#endif
-
-#endif /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_dec
-static __always_inline void
-arch_atomic_dec(atomic_t *v)
-{
- arch_atomic_sub(1, v);
}
-#define arch_atomic_dec arch_atomic_dec
-#endif
-#ifndef arch_atomic_dec_return_relaxed
-#ifdef arch_atomic_dec_return
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return
-#define arch_atomic_dec_return_release arch_atomic_dec_return
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
-#endif /* arch_atomic_dec_return */
-
-#ifndef arch_atomic_dec_return
-static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
+/**
+ * raw_atomic_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_dec(atomic_t *v)
{
- return arch_atomic_sub_return(1, v);
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
+#if defined(arch_atomic_dec)
+ arch_atomic_dec(v);
+#else
+ raw_atomic_sub(1, v);
#endif
-
-#ifndef arch_atomic_dec_return_acquire
-static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
-{
- return arch_atomic_sub_return_acquire(1, v);
}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
-#endif
-#ifndef arch_atomic_dec_return_release
+/**
+ * raw_atomic_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
+raw_atomic_dec_return(atomic_t *v)
{
- return arch_atomic_sub_return_release(1, v);
-}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+#if defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_sub_return(1, v);
#endif
-
-#ifndef arch_atomic_dec_return_relaxed
-static __always_inline int
-arch_atomic_dec_return_relaxed(atomic_t *v)
-{
- return arch_atomic_sub_return_relaxed(1, v);
}
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-#endif
-
-#else /* arch_atomic_dec_return_relaxed */
-#ifndef arch_atomic_dec_return_acquire
+/**
+ * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
+raw_atomic_dec_return_acquire(atomic_t *v)
{
+#if defined(arch_atomic_dec_return_acquire)
+ return arch_atomic_dec_return_acquire(v);
+#elif defined(arch_atomic_dec_return_relaxed)
int ret = arch_atomic_dec_return_relaxed(v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_acquire(1, v);
#endif
+}
-#ifndef arch_atomic_dec_return_release
+/**
+ * raw_atomic_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
+raw_atomic_dec_return_release(atomic_t *v)
{
+#if defined(arch_atomic_dec_return_release)
+ return arch_atomic_dec_return_release(v);
+#elif defined(arch_atomic_dec_return_relaxed)
__atomic_release_fence();
return arch_atomic_dec_return_relaxed(v);
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_release(1, v);
+#endif
}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
+
+/**
+ * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_relaxed)
+ return arch_atomic_dec_return_relaxed(v);
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_relaxed(1, v);
#endif
+}
-#ifndef arch_atomic_dec_return
+/**
+ * raw_atomic_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
+raw_atomic_fetch_dec(atomic_t *v)
{
+#if defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
int ret;
__atomic_pre_full_fence();
- ret = arch_atomic_dec_return_relaxed(v);
+ ret = arch_atomic_fetch_dec_relaxed(v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
+#else
+ return raw_atomic_fetch_sub(1, v);
#endif
-
-#endif /* arch_atomic_dec_return_relaxed */
-
-#ifndef arch_atomic_fetch_dec_relaxed
-#ifdef arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
-#endif /* arch_atomic_fetch_dec */
-
-#ifndef arch_atomic_fetch_dec
-static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
-{
- return arch_atomic_fetch_sub(1, v);
}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
-#endif
-#ifndef arch_atomic_fetch_dec_acquire
+/**
+ * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
+raw_atomic_fetch_dec_acquire(atomic_t *v)
{
- return arch_atomic_fetch_sub_acquire(1, v);
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#if defined(arch_atomic_fetch_dec_acquire)
+ return arch_atomic_fetch_dec_acquire(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ int ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_acquire(1, v);
#endif
-
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
-{
- return arch_atomic_fetch_sub_release(1, v);
}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-#ifndef arch_atomic_fetch_dec_relaxed
+/**
+ * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_dec_relaxed(atomic_t *v)
+raw_atomic_fetch_dec_release(atomic_t *v)
{
- return arch_atomic_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
+#if defined(arch_atomic_fetch_dec_release)
+ return arch_atomic_fetch_dec_release(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_dec_relaxed(v);
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_release(1, v);
#endif
+}
-#else /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_dec_acquire
+/**
+ * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
+raw_atomic_fetch_dec_relaxed(atomic_t *v)
{
- int ret = arch_atomic_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
+#if defined(arch_atomic_fetch_dec_relaxed)
+ return arch_atomic_fetch_dec_relaxed(v);
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_relaxed(1, v);
#endif
+}
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
+/**
+ * raw_atomic_and() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_and(int i, atomic_t *v)
{
- __atomic_release_fence();
- return arch_atomic_fetch_dec_relaxed(v);
+ arch_atomic_and(i, v);
}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-#ifndef arch_atomic_fetch_dec
+/**
+ * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
+raw_atomic_fetch_and(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
int ret;
__atomic_pre_full_fence();
- ret = arch_atomic_fetch_dec_relaxed(v);
+ ret = arch_atomic_fetch_and_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
+#else
+#error "Unable to define raw_atomic_fetch_and"
#endif
+}
-#endif /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_and_relaxed
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and
-#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
-#else /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_fetch_and_acquire
+/**
+ * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_and_acquire(int i, atomic_t *v)
+raw_atomic_fetch_and_acquire(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_and_acquire)
+ return arch_atomic_fetch_and_acquire(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
int ret = arch_atomic_fetch_and_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_acquire"
#endif
+}
-#ifndef arch_atomic_fetch_and_release
+/**
+ * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_and_release(int i, atomic_t *v)
+raw_atomic_fetch_and_release(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_and_release)
+ return arch_atomic_fetch_and_release(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
__atomic_release_fence();
return arch_atomic_fetch_and_relaxed(i, v);
-}
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_release"
#endif
+}
-#ifndef arch_atomic_fetch_and
+/**
+ * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_and(int i, atomic_t *v)
+raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_and arch_atomic_fetch_and
+#if defined(arch_atomic_fetch_and_relaxed)
+ return arch_atomic_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_relaxed"
#endif
-
-#endif /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_andnot
-static __always_inline void
-arch_atomic_andnot(int i, atomic_t *v)
-{
- arch_atomic_and(~i, v);
}
-#define arch_atomic_andnot arch_atomic_andnot
-#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-#ifdef arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
-#endif /* arch_atomic_fetch_andnot */
-#ifndef arch_atomic_fetch_andnot
-static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
+/**
+ * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_andnot(int i, atomic_t *v)
{
- return arch_atomic_fetch_and(~i, v);
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#if defined(arch_atomic_andnot)
+ arch_atomic_andnot(i, v);
+#else
+ raw_atomic_and(~i, v);
#endif
-
-#ifndef arch_atomic_fetch_andnot_acquire
-static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and_acquire(~i, v);
}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
-#endif
-#ifndef arch_atomic_fetch_andnot_release
+/**
+ * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+raw_atomic_fetch_andnot(int i, atomic_t *v)
{
- return arch_atomic_fetch_and_release(~i, v);
-}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+#if defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_and(~i, v);
#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-static __always_inline int
-arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and_relaxed(~i, v);
}
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
-#endif
-#else /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_andnot_acquire
+/**
+ * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_andnot_acquire)
+ return arch_atomic_fetch_andnot_acquire(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
int ret = arch_atomic_fetch_andnot_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_acquire(~i, v);
#endif
+}
-#ifndef arch_atomic_fetch_andnot_release
+/**
+ * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
+raw_atomic_fetch_andnot_release(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_andnot_release)
+ return arch_atomic_fetch_andnot_release(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
__atomic_release_fence();
return arch_atomic_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_release(~i, v);
+#endif
}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
+
+/**
+ * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_relaxed)
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_relaxed(~i, v);
#endif
+}
+
+/**
+ * raw_atomic_or() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_or(int i, atomic_t *v)
+{
+ arch_atomic_or(i, v);
+}
-#ifndef arch_atomic_fetch_andnot
+/**
+ * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
+raw_atomic_fetch_or(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
int ret;
__atomic_pre_full_fence();
- ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ ret = arch_atomic_fetch_or_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+#else
+#error "Unable to define raw_atomic_fetch_or"
#endif
+}
-#endif /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_or_relaxed
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or
-#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
-#else /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_or_acquire
+/**
+ * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_or_acquire(int i, atomic_t *v)
+raw_atomic_fetch_or_acquire(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_or_acquire)
+ return arch_atomic_fetch_or_acquire(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
int ret = arch_atomic_fetch_or_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_acquire"
#endif
+}
-#ifndef arch_atomic_fetch_or_release
+/**
+ * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_or_release(int i, atomic_t *v)
+raw_atomic_fetch_or_release(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_or_release)
+ return arch_atomic_fetch_or_release(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
__atomic_release_fence();
return arch_atomic_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_release"
+#endif
}
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
+
+/**
+ * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_relaxed)
+ return arch_atomic_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_relaxed"
#endif
+}
-#ifndef arch_atomic_fetch_or
+/**
+ * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_xor(int i, atomic_t *v)
+{
+ arch_atomic_xor(i, v);
+}
+
+/**
+ * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_or(int i, atomic_t *v)
+raw_atomic_fetch_xor(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
int ret;
__atomic_pre_full_fence();
- ret = arch_atomic_fetch_or_relaxed(i, v);
+ ret = arch_atomic_fetch_xor_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_fetch_or arch_atomic_fetch_or
+#else
+#error "Unable to define raw_atomic_fetch_xor"
#endif
+}
-#endif /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_xor_relaxed
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
-#else /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_fetch_xor_acquire
+/**
+ * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
+raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_xor_acquire)
+ return arch_atomic_fetch_xor_acquire(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
int ret = arch_atomic_fetch_xor_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_acquire"
#endif
+}
-#ifndef arch_atomic_fetch_xor_release
+/**
+ * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_xor_release(int i, atomic_t *v)
+raw_atomic_fetch_xor_release(int i, atomic_t *v)
{
+#if defined(arch_atomic_fetch_xor_release)
+ return arch_atomic_fetch_xor_release(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
__atomic_release_fence();
return arch_atomic_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_release"
+#endif
}
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
+
+/**
+ * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_relaxed)
+ return arch_atomic_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_relaxed"
#endif
+}
-#ifndef arch_atomic_fetch_xor
+/**
+ * raw_atomic_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_fetch_xor(int i, atomic_t *v)
+raw_atomic_xchg(atomic_t *v, int new)
{
+#if defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
int ret;
__atomic_pre_full_fence();
- ret = arch_atomic_fetch_xor_relaxed(i, v);
+ ret = arch_atomic_xchg_relaxed(v, new);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#else
+ return raw_xchg(&v->counter, new);
#endif
+}
-#endif /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_xchg_relaxed
-#define arch_atomic_xchg_acquire arch_atomic_xchg
-#define arch_atomic_xchg_release arch_atomic_xchg
-#define arch_atomic_xchg_relaxed arch_atomic_xchg
-#else /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_xchg_acquire
+/**
+ * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_xchg_acquire(atomic_t *v, int i)
+raw_atomic_xchg_acquire(atomic_t *v, int new)
{
- int ret = arch_atomic_xchg_relaxed(v, i);
+#if defined(arch_atomic_xchg_acquire)
+ return arch_atomic_xchg_acquire(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ int ret = arch_atomic_xchg_relaxed(v, new);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_acquire(&v->counter, new);
#endif
+}
-#ifndef arch_atomic_xchg_release
+/**
+ * raw_atomic_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_xchg_release(atomic_t *v, int i)
+raw_atomic_xchg_release(atomic_t *v, int new)
{
+#if defined(arch_atomic_xchg_release)
+ return arch_atomic_xchg_release(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
__atomic_release_fence();
- return arch_atomic_xchg_relaxed(v, i);
+ return arch_atomic_xchg_relaxed(v, new);
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_release(&v->counter, new);
+#endif
}
-#define arch_atomic_xchg_release arch_atomic_xchg_release
+
+/**
+ * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_relaxed(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_relaxed)
+ return arch_atomic_xchg_relaxed(v, new);
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_relaxed(&v->counter, new);
#endif
+}
-#ifndef arch_atomic_xchg
+/**
+ * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_xchg(atomic_t *v, int i)
+raw_atomic_cmpxchg(atomic_t *v, int old, int new)
{
+#if defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
int ret;
__atomic_pre_full_fence();
- ret = arch_atomic_xchg_relaxed(v, i);
+ ret = arch_atomic_cmpxchg_relaxed(v, old, new);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic_xchg arch_atomic_xchg
+#else
+ return raw_cmpxchg(&v->counter, old, new);
#endif
+}
-#endif /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
-#else /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_acquire
+/**
+ * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
+#if defined(arch_atomic_cmpxchg_acquire)
+ return arch_atomic_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_acquire(&v->counter, old, new);
#endif
+}
-#ifndef arch_atomic_cmpxchg_release
+/**
+ * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
+#if defined(arch_atomic_cmpxchg_release)
+ return arch_atomic_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
__atomic_release_fence();
return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_release(&v->counter, old, new);
#endif
+}
-#ifndef arch_atomic_cmpxchg
+/**
+ * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-arch_atomic_cmpxchg(atomic_t *v, int old, int new)
+raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_cmpxchg arch_atomic_cmpxchg
+#if defined(arch_atomic_cmpxchg_relaxed)
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_relaxed(&v->counter, old, new);
#endif
+}
-#endif /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_relaxed
-#ifdef arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
-#endif /* arch_atomic_try_cmpxchg */
-
-#ifndef arch_atomic_try_cmpxchg
+/**
+ * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
+#if defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
int r, o = *old;
- r = arch_atomic_cmpxchg(v, o, new);
+ r = raw_atomic_cmpxchg(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
#endif
+}
-#ifndef arch_atomic_try_cmpxchg_acquire
+/**
+ * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
+#if defined(arch_atomic_try_cmpxchg_acquire)
+ return arch_atomic_try_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
int r, o = *old;
- r = arch_atomic_cmpxchg_acquire(v, o, new);
+ r = raw_atomic_cmpxchg_acquire(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
#endif
+}
-#ifndef arch_atomic_try_cmpxchg_release
+/**
+ * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
+#if defined(arch_atomic_try_cmpxchg_release)
+ return arch_atomic_try_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
int r, o = *old;
- r = arch_atomic_cmpxchg_release(v, o, new);
+ r = raw_atomic_cmpxchg_release(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
#endif
+}
-#ifndef arch_atomic_try_cmpxchg_relaxed
+/**
+ * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
+#if defined(arch_atomic_try_cmpxchg_relaxed)
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
int r, o = *old;
- r = arch_atomic_cmpxchg_relaxed(v, o, new);
+ r = raw_atomic_cmpxchg_relaxed(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
#endif
-
-#ifndef arch_atomic_try_cmpxchg_release
-static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- __atomic_release_fence();
- return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
-#endif
-#ifndef arch_atomic_try_cmpxchg
-static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-#endif
-
-#endif /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_sub_and_test
/**
- * arch_atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
+ * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
*
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
-arch_atomic_sub_and_test(int i, atomic_t *v)
+raw_atomic_sub_and_test(int i, atomic_t *v)
{
- return arch_atomic_sub_return(i, v) == 0;
-}
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
+#if defined(arch_atomic_sub_and_test)
+ return arch_atomic_sub_and_test(i, v);
+#else
+ return raw_atomic_sub_return(i, v) == 0;
#endif
+}
-#ifndef arch_atomic_dec_and_test
/**
- * arch_atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
+ * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
*
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
-arch_atomic_dec_and_test(atomic_t *v)
+raw_atomic_dec_and_test(atomic_t *v)
{
- return arch_atomic_dec_return(v) == 0;
-}
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
+#if defined(arch_atomic_dec_and_test)
+ return arch_atomic_dec_and_test(v);
+#else
+ return raw_atomic_dec_return(v) == 0;
#endif
+}
-#ifndef arch_atomic_inc_and_test
/**
- * arch_atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
+ * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
*
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
+ * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
-arch_atomic_inc_and_test(atomic_t *v)
+raw_atomic_inc_and_test(atomic_t *v)
{
- return arch_atomic_inc_return(v) == 0;
-}
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
+#if defined(arch_atomic_inc_and_test)
+ return arch_atomic_inc_and_test(v);
+#else
+ return raw_atomic_inc_return(v) == 0;
#endif
+}
-#ifndef arch_atomic_add_negative_relaxed
-#ifdef arch_atomic_add_negative
-#define arch_atomic_add_negative_acquire arch_atomic_add_negative
-#define arch_atomic_add_negative_release arch_atomic_add_negative
-#define arch_atomic_add_negative_relaxed arch_atomic_add_negative
-#endif /* arch_atomic_add_negative */
-
-#ifndef arch_atomic_add_negative
/**
- * arch_atomic_add_negative - Add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
+ * raw_atomic_add_negative() - atomic add and test if negative with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
*
- * Atomically adds @i to @v and returns true if the result is negative,
- * or false when the result is greater than or equal to zero.
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
-arch_atomic_add_negative(int i, atomic_t *v)
+raw_atomic_add_negative(int i, atomic_t *v)
{
- return arch_atomic_add_return(i, v) < 0;
-}
-#define arch_atomic_add_negative arch_atomic_add_negative
+#if defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_add_return(i, v) < 0;
#endif
+}
-#ifndef arch_atomic_add_negative_acquire
/**
- * arch_atomic_add_negative_acquire - Add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
+ * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
*
- * Atomically adds @i to @v and returns true if the result is negative,
- * or false when the result is greater than or equal to zero.
+ * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
-arch_atomic_add_negative_acquire(int i, atomic_t *v)
+raw_atomic_add_negative_acquire(int i, atomic_t *v)
{
- return arch_atomic_add_return_acquire(i, v) < 0;
-}
-#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire
+#if defined(arch_atomic_add_negative_acquire)
+ return arch_atomic_add_negative_acquire(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ bool ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_acquire(i, v) < 0;
#endif
+}
-#ifndef arch_atomic_add_negative_release
/**
- * arch_atomic_add_negative_release - Add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
+ * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
*
- * Atomically adds @i to @v and returns true if the result is negative,
- * or false when the result is greater than or equal to zero.
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
-arch_atomic_add_negative_release(int i, atomic_t *v)
+raw_atomic_add_negative_release(int i, atomic_t *v)
{
- return arch_atomic_add_return_release(i, v) < 0;
-}
-#define arch_atomic_add_negative_release arch_atomic_add_negative_release
+#if defined(arch_atomic_add_negative_release)
+ return arch_atomic_add_negative_release(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_add_negative_relaxed(i, v);
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_release(i, v) < 0;
#endif
+}
-#ifndef arch_atomic_add_negative_relaxed
/**
- * arch_atomic_add_negative_relaxed - Add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
+ * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
*
- * Atomically adds @i to @v and returns true if the result is negative,
- * or false when the result is greater than or equal to zero.
+ * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
-arch_atomic_add_negative_relaxed(int i, atomic_t *v)
-{
- return arch_atomic_add_return_relaxed(i, v) < 0;
-}
-#define arch_atomic_add_negative_relaxed arch_atomic_add_negative_relaxed
-#endif
-
-#else /* arch_atomic_add_negative_relaxed */
-
-#ifndef arch_atomic_add_negative_acquire
-static __always_inline bool
-arch_atomic_add_negative_acquire(int i, atomic_t *v)
+raw_atomic_add_negative_relaxed(int i, atomic_t *v)
{
- bool ret = arch_atomic_add_negative_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire
-#endif
-
-#ifndef arch_atomic_add_negative_release
-static __always_inline bool
-arch_atomic_add_negative_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
+#if defined(arch_atomic_add_negative_relaxed)
return arch_atomic_add_negative_relaxed(i, v);
-}
-#define arch_atomic_add_negative_release arch_atomic_add_negative_release
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_relaxed(i, v) < 0;
#endif
-
-#ifndef arch_atomic_add_negative
-static __always_inline bool
-arch_atomic_add_negative(int i, atomic_t *v)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_add_negative_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
}
-#define arch_atomic_add_negative arch_atomic_add_negative
-#endif
-
-#endif /* arch_atomic_add_negative_relaxed */
-#ifndef arch_atomic_fetch_add_unless
/**
- * arch_atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
+ * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
*
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
*/
static __always_inline int
-arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
- int c = arch_atomic_read(v);
+#if defined(arch_atomic_fetch_add_unless)
+ return arch_atomic_fetch_add_unless(v, a, u);
+#else
+ int c = raw_atomic_read(v);
do {
if (unlikely(c == u))
break;
- } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
+ } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
return c;
-}
-#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#endif
+}
-#ifndef arch_atomic_add_unless
/**
- * arch_atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
+ * raw_atomic_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
*
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
-arch_atomic_add_unless(atomic_t *v, int a, int u)
+raw_atomic_add_unless(atomic_t *v, int a, int u)
{
- return arch_atomic_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic_add_unless arch_atomic_add_unless
+#if defined(arch_atomic_add_unless)
+ return arch_atomic_add_unless(v, a, u);
+#else
+ return raw_atomic_fetch_add_unless(v, a, u) != u;
#endif
+}
-#ifndef arch_atomic_inc_not_zero
/**
- * arch_atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
+ * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
*
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
+ * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
-arch_atomic_inc_not_zero(atomic_t *v)
+raw_atomic_inc_not_zero(atomic_t *v)
{
- return arch_atomic_add_unless(v, 1, 0);
-}
-#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
+#if defined(arch_atomic_inc_not_zero)
+ return arch_atomic_inc_not_zero(v);
+#else
+ return raw_atomic_add_unless(v, 1, 0);
#endif
+}
-#ifndef arch_atomic_inc_unless_negative
+/**
+ * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_inc_unless_negative(atomic_t *v)
+raw_atomic_inc_unless_negative(atomic_t *v)
{
- int c = arch_atomic_read(v);
+#if defined(arch_atomic_inc_unless_negative)
+ return arch_atomic_inc_unless_negative(v);
+#else
+ int c = raw_atomic_read(v);
do {
if (unlikely(c < 0))
return false;
- } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
+ } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
return true;
-}
-#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
#endif
+}
-#ifndef arch_atomic_dec_unless_positive
+/**
+ * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_dec_unless_positive(atomic_t *v)
+raw_atomic_dec_unless_positive(atomic_t *v)
{
- int c = arch_atomic_read(v);
+#if defined(arch_atomic_dec_unless_positive)
+ return arch_atomic_dec_unless_positive(v);
+#else
+ int c = raw_atomic_read(v);
do {
if (unlikely(c > 0))
return false;
- } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
+ } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
return true;
-}
-#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
#endif
+}
-#ifndef arch_atomic_dec_if_positive
+/**
+ * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
static __always_inline int
-arch_atomic_dec_if_positive(atomic_t *v)
+raw_atomic_dec_if_positive(atomic_t *v)
{
- int dec, c = arch_atomic_read(v);
+#if defined(arch_atomic_dec_if_positive)
+ return arch_atomic_dec_if_positive(v);
+#else
+ int dec, c = raw_atomic_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
- } while (!arch_atomic_try_cmpxchg(v, &c, dec));
+ } while (!raw_atomic_try_cmpxchg(v, &c, dec));
return dec;
-}
-#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#endif
+}
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
-#ifndef arch_atomic64_read_acquire
+/**
+ * raw_atomic64_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
static __always_inline s64
-arch_atomic64_read_acquire(const atomic64_t *v)
+raw_atomic64_read(const atomic64_t *v)
{
+ return arch_atomic64_read(v);
+}
+
+/**
+ * raw_atomic64_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+raw_atomic64_read_acquire(const atomic64_t *v)
+{
+#if defined(arch_atomic64_read_acquire)
+ return arch_atomic64_read_acquire(v);
+#else
s64 ret;
if (__native_word(atomic64_t)) {
ret = smp_load_acquire(&(v)->counter);
} else {
- ret = arch_atomic64_read(v);
+ ret = raw_atomic64_read(v);
__atomic_acquire_fence();
}
return ret;
-}
-#define arch_atomic64_read_acquire arch_atomic64_read_acquire
#endif
+}
-#ifndef arch_atomic64_set_release
+/**
+ * raw_atomic64_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic64_set_release(atomic64_t *v, s64 i)
+raw_atomic64_set(atomic64_t *v, s64 i)
{
+ arch_atomic64_set(v, i);
+}
+
+/**
+ * raw_atomic64_set_release() - atomic set with release ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_set_release(atomic64_t *v, s64 i)
+{
+#if defined(arch_atomic64_set_release)
+ arch_atomic64_set_release(v, i);
+#else
if (__native_word(atomic64_t)) {
smp_store_release(&(v)->counter, i);
} else {
__atomic_release_fence();
- arch_atomic64_set(v, i);
+ raw_atomic64_set(v, i);
}
-}
-#define arch_atomic64_set_release arch_atomic64_set_release
#endif
-
-#ifndef arch_atomic64_add_return_relaxed
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return
-#define arch_atomic64_add_return_release arch_atomic64_add_return
-#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
-#else /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_add_return_acquire
-static __always_inline s64
-arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
}
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
-#endif
-#ifndef arch_atomic64_add_return_release
-static __always_inline s64
-arch_atomic64_add_return_release(s64 i, atomic64_t *v)
+/**
+ * raw_atomic64_add() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_add(s64 i, atomic64_t *v)
{
- __atomic_release_fence();
- return arch_atomic64_add_return_relaxed(i, v);
+ arch_atomic64_add(i, v);
}
-#define arch_atomic64_add_return_release arch_atomic64_add_return_release
-#endif
-#ifndef arch_atomic64_add_return
+/**
+ * raw_atomic64_add_return() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_add_return(s64 i, atomic64_t *v)
+raw_atomic64_add_return(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
s64 ret;
__atomic_pre_full_fence();
ret = arch_atomic64_add_return_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_add_return arch_atomic64_add_return
+#else
+#error "Unable to define raw_atomic64_add_return"
#endif
+}
-#endif /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_fetch_add_relaxed
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
-#else /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_fetch_add_acquire
+/**
+ * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
- s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+#if defined(arch_atomic64_add_return_acquire)
+ return arch_atomic64_add_return_acquire(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ s64 ret = arch_atomic64_add_return_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_acquire"
#endif
+}
-#ifndef arch_atomic64_fetch_add_release
+/**
+ * raw_atomic64_add_return_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+raw_atomic64_add_return_release(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_add_return_release)
+ return arch_atomic64_add_return_release(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
__atomic_release_fence();
- return arch_atomic64_fetch_add_relaxed(i, v);
+ return arch_atomic64_add_return_relaxed(i, v);
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_release"
+#endif
}
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
+
+/**
+ * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_relaxed)
+ return arch_atomic64_add_return_relaxed(i, v);
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_relaxed"
#endif
+}
-#ifndef arch_atomic64_fetch_add
+/**
+ * raw_atomic64_fetch_add() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+raw_atomic64_fetch_add(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
s64 ret;
__atomic_pre_full_fence();
ret = arch_atomic64_fetch_add_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#else
+#error "Unable to define raw_atomic64_fetch_add"
#endif
+}
-#endif /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_sub_return_relaxed
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return
-#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
-#else /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_sub_return_acquire
+/**
+ * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
- s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+#if defined(arch_atomic64_fetch_add_acquire)
+ return arch_atomic64_fetch_add_acquire(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_acquire"
#endif
+}
-#ifndef arch_atomic64_sub_return_release
+/**
+ * raw_atomic64_fetch_add_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
+raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_add_release)
+ return arch_atomic64_fetch_add_release(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
__atomic_release_fence();
- return arch_atomic64_sub_return_relaxed(i, v);
+ return arch_atomic64_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_release"
+#endif
}
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
+
+/**
+ * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_relaxed)
+ return arch_atomic64_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_relaxed"
#endif
+}
+
+/**
+ * raw_atomic64_sub() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_sub(s64 i, atomic64_t *v)
+{
+ arch_atomic64_sub(i, v);
+}
-#ifndef arch_atomic64_sub_return
+/**
+ * raw_atomic64_sub_return() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_sub_return(s64 i, atomic64_t *v)
+raw_atomic64_sub_return(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
s64 ret;
__atomic_pre_full_fence();
ret = arch_atomic64_sub_return_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_sub_return arch_atomic64_sub_return
+#else
+#error "Unable to define raw_atomic64_sub_return"
#endif
+}
-#endif /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_relaxed
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
-#else /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_acquire
+/**
+ * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
- s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+#if defined(arch_atomic64_sub_return_acquire)
+ return arch_atomic64_sub_return_acquire(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ s64 ret = arch_atomic64_sub_return_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_acquire"
#endif
+}
-#ifndef arch_atomic64_fetch_sub_release
+/**
+ * raw_atomic64_sub_return_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_sub_return_release)
+ return arch_atomic64_sub_return_release(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
__atomic_release_fence();
- return arch_atomic64_fetch_sub_relaxed(i, v);
+ return arch_atomic64_sub_return_relaxed(i, v);
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_release"
+#endif
}
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
+
+/**
+ * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_relaxed)
+ return arch_atomic64_sub_return_relaxed(i, v);
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_relaxed"
#endif
+}
-#ifndef arch_atomic64_fetch_sub
+/**
+ * raw_atomic64_fetch_sub() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
+raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
s64 ret;
__atomic_pre_full_fence();
ret = arch_atomic64_fetch_sub_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#else
+#error "Unable to define raw_atomic64_fetch_sub"
#endif
-
-#endif /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_inc
-static __always_inline void
-arch_atomic64_inc(atomic64_t *v)
-{
- arch_atomic64_add(1, v);
}
-#define arch_atomic64_inc arch_atomic64_inc
-#endif
-#ifndef arch_atomic64_inc_return_relaxed
-#ifdef arch_atomic64_inc_return
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
-#endif /* arch_atomic64_inc_return */
-
-#ifndef arch_atomic64_inc_return
+/**
+ * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
+raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
- return arch_atomic64_add_return(1, v);
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
+#if defined(arch_atomic64_fetch_sub_acquire)
+ return arch_atomic64_fetch_sub_acquire(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_acquire"
#endif
-
-#ifndef arch_atomic64_inc_return_acquire
-static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
-{
- return arch_atomic64_add_return_acquire(1, v);
}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
-#endif
-#ifndef arch_atomic64_inc_return_release
+/**
+ * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
+raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
- return arch_atomic64_add_return_release(1, v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#if defined(arch_atomic64_fetch_sub_release)
+ return arch_atomic64_fetch_sub_release(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_release"
#endif
-
-#ifndef arch_atomic64_inc_return_relaxed
-static __always_inline s64
-arch_atomic64_inc_return_relaxed(atomic64_t *v)
-{
- return arch_atomic64_add_return_relaxed(1, v);
}
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
-#endif
-
-#else /* arch_atomic64_inc_return_relaxed */
-#ifndef arch_atomic64_inc_return_acquire
+/**
+ * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
+raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
- s64 ret = arch_atomic64_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
+#if defined(arch_atomic64_fetch_sub_relaxed)
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_relaxed"
#endif
+}
-#ifndef arch_atomic64_inc_return_release
-static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
+/**
+ * raw_atomic64_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_inc(atomic64_t *v)
{
- __atomic_release_fence();
- return arch_atomic64_inc_return_relaxed(v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
+#if defined(arch_atomic64_inc)
+ arch_atomic64_inc(v);
+#else
+ raw_atomic64_add(1, v);
#endif
+}
-#ifndef arch_atomic64_inc_return
+/**
+ * raw_atomic64_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
+raw_atomic64_inc_return(atomic64_t *v)
{
+#if defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
s64 ret;
__atomic_pre_full_fence();
ret = arch_atomic64_inc_return_relaxed(v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
+#else
+ return raw_atomic64_add_return(1, v);
#endif
+}
-#endif /* arch_atomic64_inc_return_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_relaxed
-#ifdef arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
-#endif /* arch_atomic64_fetch_inc */
-
-#ifndef arch_atomic64_fetch_inc
+/**
+ * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
+raw_atomic64_inc_return_acquire(atomic64_t *v)
{
- return arch_atomic64_fetch_add(1, v);
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#if defined(arch_atomic64_inc_return_acquire)
+ return arch_atomic64_inc_return_acquire(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ s64 ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_acquire(1, v);
#endif
+}
-#ifndef arch_atomic64_fetch_inc_acquire
+/**
+ * raw_atomic64_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+raw_atomic64_inc_return_release(atomic64_t *v)
{
- return arch_atomic64_fetch_add_acquire(1, v);
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#if defined(arch_atomic64_inc_return_release)
+ return arch_atomic64_inc_return_release(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_inc_return_relaxed(v);
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_release(1, v);
#endif
+}
-#ifndef arch_atomic64_fetch_inc_release
+/**
+ * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
+raw_atomic64_inc_return_relaxed(atomic64_t *v)
{
- return arch_atomic64_fetch_add_release(1, v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#if defined(arch_atomic64_inc_return_relaxed)
+ return arch_atomic64_inc_return_relaxed(v);
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_relaxed(1, v);
#endif
+}
-#ifndef arch_atomic64_fetch_inc_relaxed
+/**
+ * raw_atomic64_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
+raw_atomic64_fetch_inc(atomic64_t *v)
{
- return arch_atomic64_fetch_add_relaxed(1, v);
-}
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
+#if defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_add(1, v);
#endif
+}
-#else /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_acquire
+/**
+ * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
+raw_atomic64_fetch_inc_acquire(atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_inc_acquire)
+ return arch_atomic64_fetch_inc_acquire(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
s64 ret = arch_atomic64_fetch_inc_relaxed(v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_acquire(1, v);
#endif
+}
-#ifndef arch_atomic64_fetch_inc_release
+/**
+ * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
+raw_atomic64_fetch_inc_release(atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_inc_release)
+ return arch_atomic64_fetch_inc_release(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
__atomic_release_fence();
return arch_atomic64_fetch_inc_relaxed(v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_release(1, v);
#endif
+}
-#ifndef arch_atomic64_fetch_inc
+/**
+ * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
+raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
+#if defined(arch_atomic64_fetch_inc_relaxed)
+ return arch_atomic64_fetch_inc_relaxed(v);
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_relaxed(1, v);
#endif
-
-#endif /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_dec
-static __always_inline void
-arch_atomic64_dec(atomic64_t *v)
-{
- arch_atomic64_sub(1, v);
}
-#define arch_atomic64_dec arch_atomic64_dec
-#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-#ifdef arch_atomic64_dec_return
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
-#endif /* arch_atomic64_dec_return */
-#ifndef arch_atomic64_dec_return
-static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
+/**
+ * raw_atomic64_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_dec(atomic64_t *v)
{
- return arch_atomic64_sub_return(1, v);
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
+#if defined(arch_atomic64_dec)
+ arch_atomic64_dec(v);
+#else
+ raw_atomic64_sub(1, v);
#endif
-
-#ifndef arch_atomic64_dec_return_acquire
-static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
-{
- return arch_atomic64_sub_return_acquire(1, v);
}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
-#endif
-#ifndef arch_atomic64_dec_return_release
+/**
+ * raw_atomic64_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
+raw_atomic64_dec_return(atomic64_t *v)
{
- return arch_atomic64_sub_return_release(1, v);
-}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+#if defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_sub_return(1, v);
#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-static __always_inline s64
-arch_atomic64_dec_return_relaxed(atomic64_t *v)
-{
- return arch_atomic64_sub_return_relaxed(1, v);
}
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
-#endif
-#else /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_dec_return_acquire
+/**
+ * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
+raw_atomic64_dec_return_acquire(atomic64_t *v)
{
+#if defined(arch_atomic64_dec_return_acquire)
+ return arch_atomic64_dec_return_acquire(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
s64 ret = arch_atomic64_dec_return_relaxed(v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_acquire(1, v);
#endif
+}
-#ifndef arch_atomic64_dec_return_release
+/**
+ * raw_atomic64_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
+raw_atomic64_dec_return_release(atomic64_t *v)
{
+#if defined(arch_atomic64_dec_return_release)
+ return arch_atomic64_dec_return_release(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
__atomic_release_fence();
return arch_atomic64_dec_return_relaxed(v);
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_release(1, v);
+#endif
}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
+
+/**
+ * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_relaxed)
+ return arch_atomic64_dec_return_relaxed(v);
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_relaxed(1, v);
#endif
+}
-#ifndef arch_atomic64_dec_return
+/**
+ * raw_atomic64_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
+raw_atomic64_fetch_dec(atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
s64 ret;
__atomic_pre_full_fence();
- ret = arch_atomic64_dec_return_relaxed(v);
+ ret = arch_atomic64_fetch_dec_relaxed(v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
+#else
+ return raw_atomic64_fetch_sub(1, v);
#endif
-
-#endif /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_relaxed
-#ifdef arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
-#endif /* arch_atomic64_fetch_dec */
-
-#ifndef arch_atomic64_fetch_dec
-static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub(1, v);
}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
-#endif
-#ifndef arch_atomic64_fetch_dec_acquire
+/**
+ * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+raw_atomic64_fetch_dec_acquire(atomic64_t *v)
{
- return arch_atomic64_fetch_sub_acquire(1, v);
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#if defined(arch_atomic64_fetch_dec_acquire)
+ return arch_atomic64_fetch_dec_acquire(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_acquire(1, v);
#endif
-
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub_release(1, v);
}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-#ifndef arch_atomic64_fetch_dec_relaxed
+/**
+ * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
+raw_atomic64_fetch_dec_release(atomic64_t *v)
{
- return arch_atomic64_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
+#if defined(arch_atomic64_fetch_dec_release)
+ return arch_atomic64_fetch_dec_release(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_dec_relaxed(v);
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_release(1, v);
#endif
+}
-#else /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_acquire
+/**
+ * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
+raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
{
- s64 ret = arch_atomic64_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
+#if defined(arch_atomic64_fetch_dec_relaxed)
+ return arch_atomic64_fetch_dec_relaxed(v);
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_relaxed(1, v);
#endif
+}
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
+/**
+ * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_and(s64 i, atomic64_t *v)
{
- __atomic_release_fence();
- return arch_atomic64_fetch_dec_relaxed(v);
+ arch_atomic64_and(i, v);
}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-#ifndef arch_atomic64_fetch_dec
+/**
+ * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
+raw_atomic64_fetch_and(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
s64 ret;
__atomic_pre_full_fence();
- ret = arch_atomic64_fetch_dec_relaxed(v);
+ ret = arch_atomic64_fetch_and_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
+#else
+#error "Unable to define raw_atomic64_fetch_and"
#endif
+}
-#endif /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_and_relaxed
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
-#else /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_fetch_and_acquire
+/**
+ * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_and_acquire)
+ return arch_atomic64_fetch_and_acquire(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_acquire"
#endif
+}
-#ifndef arch_atomic64_fetch_and_release
+/**
+ * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_and_release)
+ return arch_atomic64_fetch_and_release(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
__atomic_release_fence();
return arch_atomic64_fetch_and_relaxed(i, v);
-}
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_release"
#endif
+}
-#ifndef arch_atomic64_fetch_and
+/**
+ * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#if defined(arch_atomic64_fetch_and_relaxed)
+ return arch_atomic64_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_relaxed"
#endif
-
-#endif /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_andnot
-static __always_inline void
-arch_atomic64_andnot(s64 i, atomic64_t *v)
-{
- arch_atomic64_and(~i, v);
}
-#define arch_atomic64_andnot arch_atomic64_andnot
-#endif
-#ifndef arch_atomic64_fetch_andnot_relaxed
-#ifdef arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
-#endif /* arch_atomic64_fetch_andnot */
-
-#ifndef arch_atomic64_fetch_andnot
-static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+/**
+ * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_andnot(s64 i, atomic64_t *v)
{
- return arch_atomic64_fetch_and(~i, v);
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#if defined(arch_atomic64_andnot)
+ arch_atomic64_andnot(i, v);
+#else
+ raw_atomic64_and(~i, v);
#endif
-
-#ifndef arch_atomic64_fetch_andnot_acquire
-static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and_acquire(~i, v);
}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
-#endif
-#ifndef arch_atomic64_fetch_andnot_release
+/**
+ * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
- return arch_atomic64_fetch_and_release(~i, v);
-}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+#if defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_and(~i, v);
#endif
-
-#ifndef arch_atomic64_fetch_andnot_relaxed
-static __always_inline s64
-arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and_relaxed(~i, v);
}
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_andnot_relaxed */
-#ifndef arch_atomic64_fetch_andnot_acquire
+/**
+ * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_andnot_acquire)
+ return arch_atomic64_fetch_andnot_acquire(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_acquire(~i, v);
#endif
+}
-#ifndef arch_atomic64_fetch_andnot_release
+/**
+ * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_andnot_release)
+ return arch_atomic64_fetch_andnot_release(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
__atomic_release_fence();
return arch_atomic64_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_release(~i, v);
+#endif
}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
+
+/**
+ * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_relaxed)
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_relaxed(~i, v);
#endif
+}
-#ifndef arch_atomic64_fetch_andnot
+/**
+ * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_or(s64 i, atomic64_t *v)
+{
+ arch_atomic64_or(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+raw_atomic64_fetch_or(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
s64 ret;
__atomic_pre_full_fence();
- ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ ret = arch_atomic64_fetch_or_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+#else
+#error "Unable to define raw_atomic64_fetch_or"
#endif
+}
-#endif /* arch_atomic64_fetch_andnot_relaxed */
-
-#ifndef arch_atomic64_fetch_or_relaxed
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
-#else /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_or_acquire
+/**
+ * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_or_acquire)
+ return arch_atomic64_fetch_or_acquire(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_acquire"
#endif
+}
-#ifndef arch_atomic64_fetch_or_release
+/**
+ * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_or_release)
+ return arch_atomic64_fetch_or_release(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
__atomic_release_fence();
return arch_atomic64_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_release"
+#endif
}
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
+
+/**
+ * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_relaxed)
+ return arch_atomic64_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_relaxed"
#endif
+}
+
+/**
+ * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_xor(s64 i, atomic64_t *v)
+{
+ arch_atomic64_xor(i, v);
+}
-#ifndef arch_atomic64_fetch_or
+/**
+ * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
s64 ret;
__atomic_pre_full_fence();
- ret = arch_atomic64_fetch_or_relaxed(i, v);
+ ret = arch_atomic64_fetch_xor_relaxed(i, v);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#else
+#error "Unable to define raw_atomic64_fetch_xor"
#endif
+}
-#endif /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_relaxed
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
-#else /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_acquire
+/**
+ * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_xor_acquire)
+ return arch_atomic64_fetch_xor_acquire(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_acquire"
#endif
+}
-#ifndef arch_atomic64_fetch_xor_release
+/**
+ * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
+#if defined(arch_atomic64_fetch_xor_release)
+ return arch_atomic64_fetch_xor_release(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
__atomic_release_fence();
return arch_atomic64_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_release"
+#endif
}
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
+
+/**
+ * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_relaxed)
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_relaxed"
#endif
+}
-#ifndef arch_atomic64_fetch_xor
+/**
+ * raw_atomic64_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+raw_atomic64_xchg(atomic64_t *v, s64 new)
{
+#if defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
s64 ret;
__atomic_pre_full_fence();
- ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ ret = arch_atomic64_xchg_relaxed(v, new);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#else
+ return raw_xchg(&v->counter, new);
#endif
+}
-#endif /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_xchg_relaxed
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg
-#define arch_atomic64_xchg_release arch_atomic64_xchg
-#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
-#else /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_xchg_acquire
+/**
+ * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
+raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
{
- s64 ret = arch_atomic64_xchg_relaxed(v, i);
+#if defined(arch_atomic64_xchg_acquire)
+ return arch_atomic64_xchg_acquire(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ s64 ret = arch_atomic64_xchg_relaxed(v, new);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_acquire(&v->counter, new);
#endif
+}
-#ifndef arch_atomic64_xchg_release
+/**
+ * raw_atomic64_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_xchg_release(atomic64_t *v, s64 i)
+raw_atomic64_xchg_release(atomic64_t *v, s64 new)
{
+#if defined(arch_atomic64_xchg_release)
+ return arch_atomic64_xchg_release(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
__atomic_release_fence();
- return arch_atomic64_xchg_relaxed(v, i);
+ return arch_atomic64_xchg_relaxed(v, new);
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_release(&v->counter, new);
+#endif
}
-#define arch_atomic64_xchg_release arch_atomic64_xchg_release
+
+/**
+ * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_relaxed)
+ return arch_atomic64_xchg_relaxed(v, new);
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_relaxed(&v->counter, new);
#endif
+}
-#ifndef arch_atomic64_xchg
+/**
+ * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_xchg(atomic64_t *v, s64 i)
+raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
+#if defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
s64 ret;
__atomic_pre_full_fence();
- ret = arch_atomic64_xchg_relaxed(v, i);
+ ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
__atomic_post_full_fence();
return ret;
-}
-#define arch_atomic64_xchg arch_atomic64_xchg
+#else
+ return raw_cmpxchg(&v->counter, old, new);
#endif
+}
-#endif /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_relaxed
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
-#else /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_acquire
+/**
+ * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
+#if defined(arch_atomic64_cmpxchg_acquire)
+ return arch_atomic64_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
__atomic_acquire_fence();
return ret;
-}
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_acquire(&v->counter, old, new);
#endif
+}
-#ifndef arch_atomic64_cmpxchg_release
+/**
+ * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
+#if defined(arch_atomic64_cmpxchg_release)
+ return arch_atomic64_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
__atomic_release_fence();
return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_release(&v->counter, old, new);
#endif
+}
-#ifndef arch_atomic64_cmpxchg
+/**
+ * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#if defined(arch_atomic64_cmpxchg_relaxed)
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_relaxed(&v->counter, old, new);
#endif
+}
-#endif /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_relaxed
-#ifdef arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
-#endif /* arch_atomic64_try_cmpxchg */
-
-#ifndef arch_atomic64_try_cmpxchg
+/**
+ * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
+#if defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
s64 r, o = *old;
- r = arch_atomic64_cmpxchg(v, o, new);
+ r = raw_atomic64_cmpxchg(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
#endif
+}
-#ifndef arch_atomic64_try_cmpxchg_acquire
+/**
+ * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
+#if defined(arch_atomic64_try_cmpxchg_acquire)
+ return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
s64 r, o = *old;
- r = arch_atomic64_cmpxchg_acquire(v, o, new);
+ r = raw_atomic64_cmpxchg_acquire(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
#endif
+}
-#ifndef arch_atomic64_try_cmpxchg_release
+/**
+ * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
+#if defined(arch_atomic64_try_cmpxchg_release)
+ return arch_atomic64_try_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
s64 r, o = *old;
- r = arch_atomic64_cmpxchg_release(v, o, new);
+ r = raw_atomic64_cmpxchg_release(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
#endif
+}
-#ifndef arch_atomic64_try_cmpxchg_relaxed
+/**
+ * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
+#if defined(arch_atomic64_try_cmpxchg_relaxed)
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
s64 r, o = *old;
- r = arch_atomic64_cmpxchg_relaxed(v, o, new);
+ r = raw_atomic64_cmpxchg_relaxed(v, o, new);
if (unlikely(r != o))
*old = r;
return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_release
-static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- __atomic_release_fence();
- return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
#endif
-
-#ifndef arch_atomic64_try_cmpxchg
-static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-#endif
-#endif /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_sub_and_test
/**
- * arch_atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
+ * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
*
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
-arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
+raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
{
- return arch_atomic64_sub_return(i, v) == 0;
-}
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
+#if defined(arch_atomic64_sub_and_test)
+ return arch_atomic64_sub_and_test(i, v);
+#else
+ return raw_atomic64_sub_return(i, v) == 0;
#endif
+}
-#ifndef arch_atomic64_dec_and_test
/**
- * arch_atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
+ * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
*
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
-arch_atomic64_dec_and_test(atomic64_t *v)
+raw_atomic64_dec_and_test(atomic64_t *v)
{
- return arch_atomic64_dec_return(v) == 0;
-}
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
+#if defined(arch_atomic64_dec_and_test)
+ return arch_atomic64_dec_and_test(v);
+#else
+ return raw_atomic64_dec_return(v) == 0;
#endif
+}
-#ifndef arch_atomic64_inc_and_test
/**
- * arch_atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
+ * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
*
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
+ * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
*/
static __always_inline bool
-arch_atomic64_inc_and_test(atomic64_t *v)
+raw_atomic64_inc_and_test(atomic64_t *v)
{
- return arch_atomic64_inc_return(v) == 0;
-}
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
+#if defined(arch_atomic64_inc_and_test)
+ return arch_atomic64_inc_and_test(v);
+#else
+ return raw_atomic64_inc_return(v) == 0;
#endif
+}
-#ifndef arch_atomic64_add_negative_relaxed
-#ifdef arch_atomic64_add_negative
-#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative
-#define arch_atomic64_add_negative_release arch_atomic64_add_negative
-#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative
-#endif /* arch_atomic64_add_negative */
-
-#ifndef arch_atomic64_add_negative
/**
- * arch_atomic64_add_negative - Add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
+ * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
*
- * Atomically adds @i to @v and returns true if the result is negative,
- * or false when the result is greater than or equal to zero.
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
-arch_atomic64_add_negative(s64 i, atomic64_t *v)
+raw_atomic64_add_negative(s64 i, atomic64_t *v)
{
- return arch_atomic64_add_return(i, v) < 0;
-}
-#define arch_atomic64_add_negative arch_atomic64_add_negative
+#if defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_add_return(i, v) < 0;
#endif
+}
-#ifndef arch_atomic64_add_negative_acquire
/**
- * arch_atomic64_add_negative_acquire - Add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
+ * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
*
- * Atomically adds @i to @v and returns true if the result is negative,
- * or false when the result is greater than or equal to zero.
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
-arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
{
- return arch_atomic64_add_return_acquire(i, v) < 0;
-}
-#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire
+#if defined(arch_atomic64_add_negative_acquire)
+ return arch_atomic64_add_negative_acquire(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ bool ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_acquire(i, v) < 0;
#endif
+}
-#ifndef arch_atomic64_add_negative_release
/**
- * arch_atomic64_add_negative_release - Add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
+ * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
*
- * Atomically adds @i to @v and returns true if the result is negative,
- * or false when the result is greater than or equal to zero.
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
-arch_atomic64_add_negative_release(s64 i, atomic64_t *v)
+raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
{
- return arch_atomic64_add_return_release(i, v) < 0;
-}
-#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release
+#if defined(arch_atomic64_add_negative_release)
+ return arch_atomic64_add_negative_release(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_add_negative_relaxed(i, v);
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_release(i, v) < 0;
#endif
+}
-#ifndef arch_atomic64_add_negative_relaxed
/**
- * arch_atomic64_add_negative_relaxed - Add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
+ * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
*
- * Atomically adds @i to @v and returns true if the result is negative,
- * or false when the result is greater than or equal to zero.
+ * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
*/
static __always_inline bool
-arch_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
-{
- return arch_atomic64_add_return_relaxed(i, v) < 0;
-}
-#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative_relaxed
-#endif
-
-#else /* arch_atomic64_add_negative_relaxed */
-
-#ifndef arch_atomic64_add_negative_acquire
-static __always_inline bool
-arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
-{
- bool ret = arch_atomic64_add_negative_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire
-#endif
-
-#ifndef arch_atomic64_add_negative_release
-static __always_inline bool
-arch_atomic64_add_negative_release(s64 i, atomic64_t *v)
+raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
{
- __atomic_release_fence();
+#if defined(arch_atomic64_add_negative_relaxed)
return arch_atomic64_add_negative_relaxed(i, v);
-}
-#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_relaxed(i, v) < 0;
#endif
-
-#ifndef arch_atomic64_add_negative
-static __always_inline bool
-arch_atomic64_add_negative(s64 i, atomic64_t *v)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_add_negative_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
}
-#define arch_atomic64_add_negative arch_atomic64_add_negative
-#endif
-
-#endif /* arch_atomic64_add_negative_relaxed */
-#ifndef arch_atomic64_fetch_add_unless
/**
- * arch_atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
+ * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
*
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
*/
static __always_inline s64
-arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- s64 c = arch_atomic64_read(v);
+#if defined(arch_atomic64_fetch_add_unless)
+ return arch_atomic64_fetch_add_unless(v, a, u);
+#else
+ s64 c = raw_atomic64_read(v);
do {
if (unlikely(c == u))
break;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
return c;
-}
-#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
+}
-#ifndef arch_atomic64_add_unless
/**
- * arch_atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
+ * raw_atomic64_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
*
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
+ * Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
-arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
- return arch_atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic64_add_unless arch_atomic64_add_unless
+#if defined(arch_atomic64_add_unless)
+ return arch_atomic64_add_unless(v, a, u);
+#else
+ return raw_atomic64_fetch_add_unless(v, a, u) != u;
#endif
+}
-#ifndef arch_atomic64_inc_not_zero
/**
- * arch_atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
+ * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
*
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
+ * Return: @true if @v was updated, @false otherwise.
*/
static __always_inline bool
-arch_atomic64_inc_not_zero(atomic64_t *v)
+raw_atomic64_inc_not_zero(atomic64_t *v)
{
- return arch_atomic64_add_unless(v, 1, 0);
-}
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
+#if defined(arch_atomic64_inc_not_zero)
+ return arch_atomic64_inc_not_zero(v);
+#else
+ return raw_atomic64_add_unless(v, 1, 0);
#endif
+}
-#ifndef arch_atomic64_inc_unless_negative
+/**
+ * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
-arch_atomic64_inc_unless_negative(atomic64_t *v)
+raw_atomic64_inc_unless_negative(atomic64_t *v)
{
- s64 c = arch_atomic64_read(v);
+#if defined(arch_atomic64_inc_unless_negative)
+ return arch_atomic64_inc_unless_negative(v);
+#else
+ s64 c = raw_atomic64_read(v);
do {
if (unlikely(c < 0))
return false;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
return true;
-}
-#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
#endif
+}
-#ifndef arch_atomic64_dec_unless_positive
+/**
+ * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
-arch_atomic64_dec_unless_positive(atomic64_t *v)
+raw_atomic64_dec_unless_positive(atomic64_t *v)
{
- s64 c = arch_atomic64_read(v);
+#if defined(arch_atomic64_dec_unless_positive)
+ return arch_atomic64_dec_unless_positive(v);
+#else
+ s64 c = raw_atomic64_read(v);
do {
if (unlikely(c > 0))
return false;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
return true;
-}
-#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
#endif
+}
-#ifndef arch_atomic64_dec_if_positive
+/**
+ * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
static __always_inline s64
-arch_atomic64_dec_if_positive(atomic64_t *v)
+raw_atomic64_dec_if_positive(atomic64_t *v)
{
- s64 dec, c = arch_atomic64_read(v);
+#if defined(arch_atomic64_dec_if_positive)
+ return arch_atomic64_dec_if_positive(v);
+#else
+ s64 dec, c = raw_atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
- } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
+ } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
return dec;
-}
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif
+}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// ad2e2b4d168dbc60a73922616047a9bfa446af36
+// eec048affea735b8464f58e6d96992101f8f85f1
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index 03a232a1fa57..54d7bbe0aeaa 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -4,15 +4,10 @@
// DO NOT MODIFY THIS FILE DIRECTLY
/*
- * This file provides wrappers with KASAN instrumentation for atomic operations.
- * To use this functionality an arch's atomic.h file needs to define all
- * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
- * this file at the end. This file provides atomic_read() that forwards to
- * arch_atomic_read() for actual atomic operation.
- * Note: if an arch atomic operation is implemented by means of other atomic
- * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
- * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
- * double instrumentation.
+ * This file provoides atomic operations with explicit instrumentation (e.g.
+ * KASAN, KCSAN), which should be used unless it is necessary to avoid
+ * instrumentation. Where it is necessary to aovid instrumenation, the
+ * raw_atomic*() operations should be used.
*/
#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
#define _LINUX_ATOMIC_INSTRUMENTED_H
@@ -21,1927 +16,4696 @@
#include <linux/compiler.h>
#include <linux/instrumented.h>
+/**
+ * atomic_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
static __always_inline int
atomic_read(const atomic_t *v)
{
instrument_atomic_read(v, sizeof(*v));
- return arch_atomic_read(v);
-}
-
+ return raw_atomic_read(v);
+}
+
+/**
+ * atomic_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
static __always_inline int
atomic_read_acquire(const atomic_t *v)
{
instrument_atomic_read(v, sizeof(*v));
- return arch_atomic_read_acquire(v);
-}
-
+ return raw_atomic_read_acquire(v);
+}
+
+/**
+ * atomic_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_set() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_set(atomic_t *v, int i)
{
instrument_atomic_write(v, sizeof(*v));
- arch_atomic_set(v, i);
-}
-
+ raw_atomic_set(v, i);
+}
+
+/**
+ * atomic_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_set_release() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_set_release(atomic_t *v, int i)
{
kcsan_release();
instrument_atomic_write(v, sizeof(*v));
- arch_atomic_set_release(v, i);
-}
-
+ raw_atomic_set_release(v, i);
+}
+
+/**
+ * atomic_add() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_add(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_add(i, v);
+ raw_atomic_add(i, v);
}
+/**
+ * atomic_add_return() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_add_return(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_return(i, v);
+ return raw_atomic_add_return(i, v);
}
+/**
+ * atomic_add_return_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_add_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_return_acquire(i, v);
+ return raw_atomic_add_return_acquire(i, v);
}
+/**
+ * atomic_add_return_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_add_return_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_return_release(i, v);
+ return raw_atomic_add_return_release(i, v);
}
+/**
+ * atomic_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_add_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_return_relaxed(i, v);
+ return raw_atomic_add_return_relaxed(i, v);
}
+/**
+ * atomic_fetch_add() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_add(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add(i, v);
+ return raw_atomic_fetch_add(i, v);
}
+/**
+ * atomic_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_add_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add_acquire(i, v);
+ return raw_atomic_fetch_add_acquire(i, v);
}
+/**
+ * atomic_fetch_add_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_add_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add_release(i, v);
+ return raw_atomic_fetch_add_release(i, v);
}
+/**
+ * atomic_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_add_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add_relaxed(i, v);
+ return raw_atomic_fetch_add_relaxed(i, v);
}
+/**
+ * atomic_sub() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_sub(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_sub(i, v);
+ raw_atomic_sub(i, v);
}
+/**
+ * atomic_sub_return() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_sub_return(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_return(i, v);
+ return raw_atomic_sub_return(i, v);
}
+/**
+ * atomic_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_sub_return_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_return_acquire(i, v);
+ return raw_atomic_sub_return_acquire(i, v);
}
+/**
+ * atomic_sub_return_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_sub_return_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_return_release(i, v);
+ return raw_atomic_sub_return_release(i, v);
}
+/**
+ * atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_sub_return_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_return_relaxed(i, v);
+ return raw_atomic_sub_return_relaxed(i, v);
}
+/**
+ * atomic_fetch_sub() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_sub(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_sub(i, v);
+ return raw_atomic_fetch_sub(i, v);
}
+/**
+ * atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_sub_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_acquire(i, v);
+ return raw_atomic_fetch_sub_acquire(i, v);
}
+/**
+ * atomic_fetch_sub_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_sub_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_release(i, v);
+ return raw_atomic_fetch_sub_release(i, v);
}
+/**
+ * atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_relaxed(i, v);
+ return raw_atomic_fetch_sub_relaxed(i, v);
}
+/**
+ * atomic_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_inc(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_inc(v);
+ raw_atomic_inc(v);
}
+/**
+ * atomic_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_inc_return(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_return(v);
+ return raw_atomic_inc_return(v);
}
+/**
+ * atomic_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_inc_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_return_acquire(v);
+ return raw_atomic_inc_return_acquire(v);
}
+/**
+ * atomic_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_inc_return_release(atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_return_release(v);
+ return raw_atomic_inc_return_release(v);
}
+/**
+ * atomic_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_inc_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_return_relaxed(v);
+ return raw_atomic_inc_return_relaxed(v);
}
+/**
+ * atomic_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_inc(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_inc(v);
+ return raw_atomic_fetch_inc(v);
}
+/**
+ * atomic_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_inc_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_acquire(v);
+ return raw_atomic_fetch_inc_acquire(v);
}
+/**
+ * atomic_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_inc_release(atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_release(v);
+ return raw_atomic_fetch_inc_release(v);
}
+/**
+ * atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_inc_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_relaxed(v);
+ return raw_atomic_fetch_inc_relaxed(v);
}
+/**
+ * atomic_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_dec(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_dec(v);
+ raw_atomic_dec(v);
}
+/**
+ * atomic_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_dec_return(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_return(v);
+ return raw_atomic_dec_return(v);
}
+/**
+ * atomic_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_dec_return_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_return_acquire(v);
+ return raw_atomic_dec_return_acquire(v);
}
+/**
+ * atomic_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_dec_return_release(atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_return_release(v);
+ return raw_atomic_dec_return_release(v);
}
+/**
+ * atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline int
atomic_dec_return_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_return_relaxed(v);
+ return raw_atomic_dec_return_relaxed(v);
}
+/**
+ * atomic_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_dec(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_dec(v);
+ return raw_atomic_fetch_dec(v);
}
+/**
+ * atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_dec_acquire(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_acquire(v);
+ return raw_atomic_fetch_dec_acquire(v);
}
+/**
+ * atomic_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_dec_release(atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_release(v);
+ return raw_atomic_fetch_dec_release(v);
}
+/**
+ * atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_dec_relaxed(atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_relaxed(v);
+ return raw_atomic_fetch_dec_relaxed(v);
}
+/**
+ * atomic_and() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_and() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_and(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_and(i, v);
+ raw_atomic_and(i, v);
}
+/**
+ * atomic_fetch_and() - atomic bitwise AND with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_and(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_and(i, v);
+ return raw_atomic_fetch_and(i, v);
}
+/**
+ * atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_and_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_and_acquire(i, v);
+ return raw_atomic_fetch_and_acquire(i, v);
}
+/**
+ * atomic_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_and_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_and_release(i, v);
+ return raw_atomic_fetch_and_release(i, v);
}
+/**
+ * atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_and_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_and_relaxed(i, v);
+ return raw_atomic_fetch_and_relaxed(i, v);
}
+/**
+ * atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_andnot() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_andnot(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_andnot(i, v);
+ raw_atomic_andnot(i, v);
}
+/**
+ * atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_andnot(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot(i, v);
+ return raw_atomic_fetch_andnot(i, v);
}
+/**
+ * atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_acquire(i, v);
+ return raw_atomic_fetch_andnot_acquire(i, v);
}
+/**
+ * atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_andnot_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_release(i, v);
+ return raw_atomic_fetch_andnot_release(i, v);
}
+/**
+ * atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_relaxed(i, v);
+ return raw_atomic_fetch_andnot_relaxed(i, v);
}
+/**
+ * atomic_or() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_or() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_or(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_or(i, v);
+ raw_atomic_or(i, v);
}
+/**
+ * atomic_fetch_or() - atomic bitwise OR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_or(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_or(i, v);
+ return raw_atomic_fetch_or(i, v);
}
+/**
+ * atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_or_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_or_acquire(i, v);
+ return raw_atomic_fetch_or_acquire(i, v);
}
+/**
+ * atomic_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_or_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_or_release(i, v);
+ return raw_atomic_fetch_or_release(i, v);
}
+/**
+ * atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_or_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_or_relaxed(i, v);
+ return raw_atomic_fetch_or_relaxed(i, v);
}
+/**
+ * atomic_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xor() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_xor(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_xor(i, v);
+ raw_atomic_xor(i, v);
}
+/**
+ * atomic_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_xor(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_xor(i, v);
+ return raw_atomic_fetch_xor(i, v);
}
+/**
+ * atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_xor_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_acquire(i, v);
+ return raw_atomic_fetch_xor_acquire(i, v);
}
+/**
+ * atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_xor_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_release(i, v);
+ return raw_atomic_fetch_xor_release(i, v);
}
+/**
+ * atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_relaxed(i, v);
+ return raw_atomic_fetch_xor_relaxed(i, v);
}
+/**
+ * atomic_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-atomic_xchg(atomic_t *v, int i)
+atomic_xchg(atomic_t *v, int new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_xchg(v, i);
+ return raw_atomic_xchg(v, new);
}
+/**
+ * atomic_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
+atomic_xchg_acquire(atomic_t *v, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_xchg_acquire(v, i);
+ return raw_atomic_xchg_acquire(v, new);
}
+/**
+ * atomic_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
+atomic_xchg_release(atomic_t *v, int new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_xchg_release(v, i);
+ return raw_atomic_xchg_release(v, new);
}
+/**
+ * atomic_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
-atomic_xchg_relaxed(atomic_t *v, int i)
+atomic_xchg_relaxed(atomic_t *v, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_xchg_relaxed(v, i);
+ return raw_atomic_xchg_relaxed(v, new);
}
+/**
+ * atomic_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_cmpxchg(atomic_t *v, int old, int new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_cmpxchg(v, old, new);
+ return raw_atomic_cmpxchg(v, old, new);
}
+/**
+ * atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_acquire(v, old, new);
+ return raw_atomic_cmpxchg_acquire(v, old, new);
}
+/**
+ * atomic_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_release(v, old, new);
+ return raw_atomic_cmpxchg_release(v, old, new);
}
+/**
+ * atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_relaxed(v, old, new);
+ return raw_atomic_cmpxchg_relaxed(v, old, new);
}
+/**
+ * atomic_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg(v, old, new);
-}
-
+ return raw_atomic_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_acquire(v, old, new);
-}
-
+ return raw_atomic_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_release(v, old, new);
-}
-
+ return raw_atomic_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-
+ return raw_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
atomic_sub_and_test(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_and_test(i, v);
+ return raw_atomic_sub_and_test(i, v);
}
+/**
+ * atomic_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
atomic_dec_and_test(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_and_test(v);
+ return raw_atomic_dec_and_test(v);
}
+/**
+ * atomic_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
atomic_inc_and_test(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_and_test(v);
+ return raw_atomic_inc_and_test(v);
}
+/**
+ * atomic_add_negative() - atomic add and test if negative with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic_add_negative(int i, atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_negative(i, v);
+ return raw_atomic_add_negative(i, v);
}
+/**
+ * atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic_add_negative_acquire(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_negative_acquire(i, v);
+ return raw_atomic_add_negative_acquire(i, v);
}
+/**
+ * atomic_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic_add_negative_release(int i, atomic_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_negative_release(i, v);
+ return raw_atomic_add_negative_release(i, v);
}
+/**
+ * atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic_add_negative_relaxed(int i, atomic_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_negative_relaxed(i, v);
+ return raw_atomic_add_negative_relaxed(i, v);
}
+/**
+ * atomic_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add_unless(v, a, u);
+ return raw_atomic_fetch_add_unless(v, a, u);
}
+/**
+ * atomic_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic_add_unless(atomic_t *v, int a, int u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_unless(v, a, u);
+ return raw_atomic_add_unless(v, a, u);
}
+/**
+ * atomic_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic_inc_not_zero(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_not_zero(v);
+ return raw_atomic_inc_not_zero(v);
}
+/**
+ * atomic_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic_inc_unless_negative(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_unless_negative(v);
+ return raw_atomic_inc_unless_negative(v);
}
+/**
+ * atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic_dec_unless_positive(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_unless_positive(v);
+ return raw_atomic_dec_unless_positive(v);
}
+/**
+ * atomic_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
static __always_inline int
atomic_dec_if_positive(atomic_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_if_positive(v);
+ return raw_atomic_dec_if_positive(v);
}
+/**
+ * atomic64_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
static __always_inline s64
atomic64_read(const atomic64_t *v)
{
instrument_atomic_read(v, sizeof(*v));
- return arch_atomic64_read(v);
-}
-
+ return raw_atomic64_read(v);
+}
+
+/**
+ * atomic64_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
static __always_inline s64
atomic64_read_acquire(const atomic64_t *v)
{
instrument_atomic_read(v, sizeof(*v));
- return arch_atomic64_read_acquire(v);
-}
-
+ return raw_atomic64_read_acquire(v);
+}
+
+/**
+ * atomic64_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_set() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_set(atomic64_t *v, s64 i)
{
instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_set(v, i);
-}
-
+ raw_atomic64_set(v, i);
+}
+
+/**
+ * atomic64_set_release() - atomic set with release ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_set_release() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_set_release(atomic64_t *v, s64 i)
{
kcsan_release();
instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_set_release(v, i);
-}
-
+ raw_atomic64_set_release(v, i);
+}
+
+/**
+ * atomic64_add() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_add(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_add(i, v);
+ raw_atomic64_add(i, v);
}
+/**
+ * atomic64_add_return() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_add_return(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_return(i, v);
+ return raw_atomic64_add_return(i, v);
}
+/**
+ * atomic64_add_return_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_return_acquire(i, v);
+ return raw_atomic64_add_return_acquire(i, v);
}
+/**
+ * atomic64_add_return_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_add_return_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_return_release(i, v);
+ return raw_atomic64_add_return_release(i, v);
}
+/**
+ * atomic64_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_return_relaxed(i, v);
+ return raw_atomic64_add_return_relaxed(i, v);
}
+/**
+ * atomic64_fetch_add() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_add(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add(i, v);
+ return raw_atomic64_fetch_add(i, v);
}
+/**
+ * atomic64_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_acquire(i, v);
+ return raw_atomic64_fetch_add_acquire(i, v);
}
+/**
+ * atomic64_fetch_add_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_release(i, v);
+ return raw_atomic64_fetch_add_release(i, v);
}
+/**
+ * atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_relaxed(i, v);
+ return raw_atomic64_fetch_add_relaxed(i, v);
}
+/**
+ * atomic64_sub() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_sub(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_sub(i, v);
+ raw_atomic64_sub(i, v);
}
+/**
+ * atomic64_sub_return() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_sub_return(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_return(i, v);
+ return raw_atomic64_sub_return(i, v);
}
+/**
+ * atomic64_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_return_acquire(i, v);
+ return raw_atomic64_sub_return_acquire(i, v);
}
+/**
+ * atomic64_sub_return_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_sub_return_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_return_release(i, v);
+ return raw_atomic64_sub_return_release(i, v);
}
+/**
+ * atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_return_relaxed(i, v);
+ return raw_atomic64_sub_return_relaxed(i, v);
}
+/**
+ * atomic64_fetch_sub() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_sub(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub(i, v);
+ return raw_atomic64_fetch_sub(i, v);
}
+/**
+ * atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_acquire(i, v);
+ return raw_atomic64_fetch_sub_acquire(i, v);
}
+/**
+ * atomic64_fetch_sub_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_release(i, v);
+ return raw_atomic64_fetch_sub_release(i, v);
}
+/**
+ * atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_relaxed(i, v);
+ return raw_atomic64_fetch_sub_relaxed(i, v);
}
+/**
+ * atomic64_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_inc(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_inc(v);
+ raw_atomic64_inc(v);
}
+/**
+ * atomic64_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_inc_return(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_return(v);
+ return raw_atomic64_inc_return(v);
}
+/**
+ * atomic64_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_inc_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_return_acquire(v);
+ return raw_atomic64_inc_return_acquire(v);
}
+/**
+ * atomic64_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_inc_return_release(atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_return_release(v);
+ return raw_atomic64_inc_return_release(v);
}
+/**
+ * atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_inc_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_return_relaxed(v);
+ return raw_atomic64_inc_return_relaxed(v);
}
+/**
+ * atomic64_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_inc(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc(v);
+ return raw_atomic64_fetch_inc(v);
}
+/**
+ * atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_inc_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_acquire(v);
+ return raw_atomic64_fetch_inc_acquire(v);
}
+/**
+ * atomic64_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_inc_release(atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_release(v);
+ return raw_atomic64_fetch_inc_release(v);
}
+/**
+ * atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_inc_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_relaxed(v);
+ return raw_atomic64_fetch_inc_relaxed(v);
}
+/**
+ * atomic64_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_dec(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_dec(v);
+ raw_atomic64_dec(v);
}
+/**
+ * atomic64_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_dec_return(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_return(v);
+ return raw_atomic64_dec_return(v);
}
+/**
+ * atomic64_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_dec_return_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_return_acquire(v);
+ return raw_atomic64_dec_return_acquire(v);
}
+/**
+ * atomic64_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_dec_return_release(atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_return_release(v);
+ return raw_atomic64_dec_return_release(v);
}
+/**
+ * atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline s64
atomic64_dec_return_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_return_relaxed(v);
+ return raw_atomic64_dec_return_relaxed(v);
}
+/**
+ * atomic64_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_dec(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec(v);
+ return raw_atomic64_fetch_dec(v);
}
+/**
+ * atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_dec_acquire(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_acquire(v);
+ return raw_atomic64_fetch_dec_acquire(v);
}
+/**
+ * atomic64_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_dec_release(atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_release(v);
+ return raw_atomic64_fetch_dec_release(v);
}
+/**
+ * atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_dec_relaxed(atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_relaxed(v);
+ return raw_atomic64_fetch_dec_relaxed(v);
}
+/**
+ * atomic64_and() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_and() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_and(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_and(i, v);
+ raw_atomic64_and(i, v);
}
+/**
+ * atomic64_fetch_and() - atomic bitwise AND with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_and(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_and(i, v);
+ return raw_atomic64_fetch_and(i, v);
}
+/**
+ * atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_acquire(i, v);
+ return raw_atomic64_fetch_and_acquire(i, v);
}
+/**
+ * atomic64_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_release(i, v);
+ return raw_atomic64_fetch_and_release(i, v);
}
+/**
+ * atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_relaxed(i, v);
+ return raw_atomic64_fetch_and_relaxed(i, v);
}
+/**
+ * atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_andnot() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_andnot(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_andnot(i, v);
+ raw_atomic64_andnot(i, v);
}
+/**
+ * atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot(i, v);
+ return raw_atomic64_fetch_andnot(i, v);
}
+/**
+ * atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_acquire(i, v);
+ return raw_atomic64_fetch_andnot_acquire(i, v);
}
+/**
+ * atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_release(i, v);
+ return raw_atomic64_fetch_andnot_release(i, v);
}
+/**
+ * atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_relaxed(i, v);
+ return raw_atomic64_fetch_andnot_relaxed(i, v);
}
+/**
+ * atomic64_or() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_or() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_or(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_or(i, v);
+ raw_atomic64_or(i, v);
}
+/**
+ * atomic64_fetch_or() - atomic bitwise OR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_or(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_or(i, v);
+ return raw_atomic64_fetch_or(i, v);
}
+/**
+ * atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_acquire(i, v);
+ return raw_atomic64_fetch_or_acquire(i, v);
}
+/**
+ * atomic64_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_release(i, v);
+ return raw_atomic64_fetch_or_release(i, v);
}
+/**
+ * atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_relaxed(i, v);
+ return raw_atomic64_fetch_or_relaxed(i, v);
}
+/**
+ * atomic64_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xor() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic64_xor(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_xor(i, v);
+ raw_atomic64_xor(i, v);
}
+/**
+ * atomic64_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_xor(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor(i, v);
+ return raw_atomic64_fetch_xor(i, v);
}
+/**
+ * atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_acquire(i, v);
+ return raw_atomic64_fetch_xor_acquire(i, v);
}
+/**
+ * atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_release(i, v);
+ return raw_atomic64_fetch_xor_release(i, v);
}
+/**
+ * atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_relaxed(i, v);
+ return raw_atomic64_fetch_xor_relaxed(i, v);
}
+/**
+ * atomic64_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
+atomic64_xchg(atomic64_t *v, s64 new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_xchg(v, i);
+ return raw_atomic64_xchg(v, new);
}
+/**
+ * atomic64_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
+atomic64_xchg_acquire(atomic64_t *v, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_xchg_acquire(v, i);
+ return raw_atomic64_xchg_acquire(v, new);
}
+/**
+ * atomic64_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
+atomic64_xchg_release(atomic64_t *v, s64 new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_xchg_release(v, i);
+ return raw_atomic64_xchg_release(v, new);
}
+/**
+ * atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
-atomic64_xchg_relaxed(atomic64_t *v, s64 i)
+atomic64_xchg_relaxed(atomic64_t *v, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_xchg_relaxed(v, i);
+ return raw_atomic64_xchg_relaxed(v, new);
}
+/**
+ * atomic64_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg(v, old, new);
+ return raw_atomic64_cmpxchg(v, old, new);
}
+/**
+ * atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_acquire(v, old, new);
+ return raw_atomic64_cmpxchg_acquire(v, old, new);
}
+/**
+ * atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_release(v, old, new);
+ return raw_atomic64_cmpxchg_release(v, old, new);
}
+/**
+ * atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_relaxed(v, old, new);
+ return raw_atomic64_cmpxchg_relaxed(v, old, new);
}
+/**
+ * atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg(v, old, new);
-}
-
+ return raw_atomic64_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_acquire(v, old, new);
-}
-
+ return raw_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_release(v, old, new);
-}
-
+ return raw_atomic64_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-
+ return raw_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_and_test(i, v);
+ return raw_atomic64_sub_and_test(i, v);
}
+/**
+ * atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
atomic64_dec_and_test(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_and_test(v);
+ return raw_atomic64_dec_and_test(v);
}
+/**
+ * atomic64_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
atomic64_inc_and_test(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_and_test(v);
+ return raw_atomic64_inc_and_test(v);
}
+/**
+ * atomic64_add_negative() - atomic add and test if negative with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic64_add_negative(s64 i, atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_negative(i, v);
+ return raw_atomic64_add_negative(i, v);
}
+/**
+ * atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic64_add_negative_acquire(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_negative_acquire(i, v);
+ return raw_atomic64_add_negative_acquire(i, v);
}
+/**
+ * atomic64_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic64_add_negative_release(s64 i, atomic64_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_negative_release(i, v);
+ return raw_atomic64_add_negative_release(i, v);
}
+/**
+ * atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_negative_relaxed(i, v);
+ return raw_atomic64_add_negative_relaxed(i, v);
}
+/**
+ * atomic64_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_unless(v, a, u);
+ return raw_atomic64_fetch_add_unless(v, a, u);
}
+/**
+ * atomic64_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
+ return raw_atomic64_add_unless(v, a, u);
}
+/**
+ * atomic64_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic64_inc_not_zero(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_not_zero(v);
+ return raw_atomic64_inc_not_zero(v);
}
+/**
+ * atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic64_inc_unless_negative(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_unless_negative(v);
+ return raw_atomic64_inc_unless_negative(v);
}
+/**
+ * atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic64_dec_unless_positive(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_unless_positive(v);
+ return raw_atomic64_dec_unless_positive(v);
}
+/**
+ * atomic64_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
static __always_inline s64
atomic64_dec_if_positive(atomic64_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_if_positive(v);
+ return raw_atomic64_dec_if_positive(v);
}
+/**
+ * atomic_long_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
static __always_inline long
atomic_long_read(const atomic_long_t *v)
{
instrument_atomic_read(v, sizeof(*v));
- return arch_atomic_long_read(v);
-}
-
+ return raw_atomic_long_read(v);
+}
+
+/**
+ * atomic_long_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
static __always_inline long
atomic_long_read_acquire(const atomic_long_t *v)
{
instrument_atomic_read(v, sizeof(*v));
- return arch_atomic_long_read_acquire(v);
-}
-
+ return raw_atomic_long_read_acquire(v);
+}
+
+/**
+ * atomic_long_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_set() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_set(atomic_long_t *v, long i)
{
instrument_atomic_write(v, sizeof(*v));
- arch_atomic_long_set(v, i);
-}
-
+ raw_atomic_long_set(v, i);
+}
+
+/**
+ * atomic_long_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_set_release() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_set_release(atomic_long_t *v, long i)
{
kcsan_release();
instrument_atomic_write(v, sizeof(*v));
- arch_atomic_long_set_release(v, i);
-}
-
+ raw_atomic_long_set_release(v, i);
+}
+
+/**
+ * atomic_long_add() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_add(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_long_add(i, v);
+ raw_atomic_long_add(i, v);
}
+/**
+ * atomic_long_add_return() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_add_return(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_add_return(i, v);
+ return raw_atomic_long_add_return(i, v);
}
+/**
+ * atomic_long_add_return_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_add_return_acquire(i, v);
+ return raw_atomic_long_add_return_acquire(i, v);
}
+/**
+ * atomic_long_add_return_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_add_return_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_add_return_release(i, v);
+ return raw_atomic_long_add_return_release(i, v);
}
+/**
+ * atomic_long_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_add_return_relaxed(i, v);
+ return raw_atomic_long_add_return_relaxed(i, v);
}
+/**
+ * atomic_long_fetch_add() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_add(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_add(i, v);
+ return raw_atomic_long_fetch_add(i, v);
}
+/**
+ * atomic_long_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_add_acquire(i, v);
+ return raw_atomic_long_fetch_add_acquire(i, v);
}
+/**
+ * atomic_long_fetch_add_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_add_release(i, v);
+ return raw_atomic_long_fetch_add_release(i, v);
}
+/**
+ * atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_add_relaxed(i, v);
+ return raw_atomic_long_fetch_add_relaxed(i, v);
}
+/**
+ * atomic_long_sub() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_sub(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_long_sub(i, v);
+ raw_atomic_long_sub(i, v);
}
+/**
+ * atomic_long_sub_return() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_sub_return(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_sub_return(i, v);
+ return raw_atomic_long_sub_return(i, v);
}
+/**
+ * atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_sub_return_acquire(i, v);
+ return raw_atomic_long_sub_return_acquire(i, v);
}
+/**
+ * atomic_long_sub_return_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_sub_return_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_sub_return_release(i, v);
+ return raw_atomic_long_sub_return_release(i, v);
}
+/**
+ * atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_sub_return_relaxed(i, v);
+ return raw_atomic_long_sub_return_relaxed(i, v);
}
+/**
+ * atomic_long_fetch_sub() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_sub(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_sub(i, v);
+ return raw_atomic_long_fetch_sub(i, v);
}
+/**
+ * atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_sub_acquire(i, v);
+ return raw_atomic_long_fetch_sub_acquire(i, v);
}
+/**
+ * atomic_long_fetch_sub_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_sub_release(i, v);
+ return raw_atomic_long_fetch_sub_release(i, v);
}
+/**
+ * atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_sub_relaxed(i, v);
+ return raw_atomic_long_fetch_sub_relaxed(i, v);
}
+/**
+ * atomic_long_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_inc(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_long_inc(v);
+ raw_atomic_long_inc(v);
}
+/**
+ * atomic_long_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_inc_return(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_inc_return(v);
+ return raw_atomic_long_inc_return(v);
}
+/**
+ * atomic_long_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_inc_return_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_inc_return_acquire(v);
+ return raw_atomic_long_inc_return_acquire(v);
}
+/**
+ * atomic_long_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_inc_return_release(atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_inc_return_release(v);
+ return raw_atomic_long_inc_return_release(v);
}
+/**
+ * atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_inc_return_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_inc_return_relaxed(v);
+ return raw_atomic_long_inc_return_relaxed(v);
}
+/**
+ * atomic_long_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_inc(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_inc(v);
+ return raw_atomic_long_fetch_inc(v);
}
+/**
+ * atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_inc_acquire(v);
+ return raw_atomic_long_fetch_inc_acquire(v);
}
+/**
+ * atomic_long_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_inc_release(atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_inc_release(v);
+ return raw_atomic_long_fetch_inc_release(v);
}
+/**
+ * atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_inc_relaxed(v);
+ return raw_atomic_long_fetch_inc_relaxed(v);
}
+/**
+ * atomic_long_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_dec(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_long_dec(v);
+ raw_atomic_long_dec(v);
}
+/**
+ * atomic_long_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_dec_return(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_dec_return(v);
+ return raw_atomic_long_dec_return(v);
}
+/**
+ * atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_dec_return_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_dec_return_acquire(v);
+ return raw_atomic_long_dec_return_acquire(v);
}
+/**
+ * atomic_long_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_dec_return_release(atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_dec_return_release(v);
+ return raw_atomic_long_dec_return_release(v);
}
+/**
+ * atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
atomic_long_dec_return_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_dec_return_relaxed(v);
+ return raw_atomic_long_dec_return_relaxed(v);
}
+/**
+ * atomic_long_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_dec(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_dec(v);
+ return raw_atomic_long_fetch_dec(v);
}
+/**
+ * atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_dec_acquire(v);
+ return raw_atomic_long_fetch_dec_acquire(v);
}
+/**
+ * atomic_long_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_dec_release(atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_dec_release(v);
+ return raw_atomic_long_fetch_dec_release(v);
}
+/**
+ * atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_dec_relaxed(v);
+ return raw_atomic_long_fetch_dec_relaxed(v);
}
+/**
+ * atomic_long_and() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_and() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_and(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_long_and(i, v);
+ raw_atomic_long_and(i, v);
}
+/**
+ * atomic_long_fetch_and() - atomic bitwise AND with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_and(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_and(i, v);
+ return raw_atomic_long_fetch_and(i, v);
}
+/**
+ * atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_and_acquire(i, v);
+ return raw_atomic_long_fetch_and_acquire(i, v);
}
+/**
+ * atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_and_release(i, v);
+ return raw_atomic_long_fetch_and_release(i, v);
}
+/**
+ * atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_and_relaxed(i, v);
+ return raw_atomic_long_fetch_and_relaxed(i, v);
}
+/**
+ * atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_andnot() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_andnot(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_long_andnot(i, v);
+ raw_atomic_long_andnot(i, v);
}
+/**
+ * atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_andnot(i, v);
+ return raw_atomic_long_fetch_andnot(i, v);
}
+/**
+ * atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_andnot_acquire(i, v);
+ return raw_atomic_long_fetch_andnot_acquire(i, v);
}
+/**
+ * atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_andnot_release(i, v);
+ return raw_atomic_long_fetch_andnot_release(i, v);
}
+/**
+ * atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_andnot_relaxed(i, v);
+ return raw_atomic_long_fetch_andnot_relaxed(i, v);
}
+/**
+ * atomic_long_or() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_or() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_or(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_long_or(i, v);
+ raw_atomic_long_or(i, v);
}
+/**
+ * atomic_long_fetch_or() - atomic bitwise OR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_or(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_or(i, v);
+ return raw_atomic_long_fetch_or(i, v);
}
+/**
+ * atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_or_acquire(i, v);
+ return raw_atomic_long_fetch_or_acquire(i, v);
}
+/**
+ * atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_or_release(i, v);
+ return raw_atomic_long_fetch_or_release(i, v);
}
+/**
+ * atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_or_relaxed(i, v);
+ return raw_atomic_long_fetch_or_relaxed(i, v);
}
+/**
+ * atomic_long_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xor() there.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
atomic_long_xor(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_long_xor(i, v);
+ raw_atomic_long_xor(i, v);
}
+/**
+ * atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_xor(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_xor(i, v);
+ return raw_atomic_long_fetch_xor(i, v);
}
+/**
+ * atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_xor_acquire(i, v);
+ return raw_atomic_long_fetch_xor_acquire(i, v);
}
+/**
+ * atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_xor_release(i, v);
+ return raw_atomic_long_fetch_xor_release(i, v);
}
+/**
+ * atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_xor_relaxed(i, v);
+ return raw_atomic_long_fetch_xor_relaxed(i, v);
}
+/**
+ * atomic_long_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
+atomic_long_xchg(atomic_long_t *v, long new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_xchg(v, i);
+ return raw_atomic_long_xchg(v, new);
}
+/**
+ * atomic_long_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
+atomic_long_xchg_acquire(atomic_long_t *v, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_xchg_acquire(v, i);
+ return raw_atomic_long_xchg_acquire(v, new);
}
+/**
+ * atomic_long_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
+atomic_long_xchg_release(atomic_long_t *v, long new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_xchg_release(v, i);
+ return raw_atomic_long_xchg_release(v, new);
}
+/**
+ * atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+atomic_long_xchg_relaxed(atomic_long_t *v, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_xchg_relaxed(v, i);
+ return raw_atomic_long_xchg_relaxed(v, new);
}
+/**
+ * atomic_long_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_cmpxchg(v, old, new);
+ return raw_atomic_long_cmpxchg(v, old, new);
}
+/**
+ * atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_cmpxchg_acquire(v, old, new);
+ return raw_atomic_long_cmpxchg_acquire(v, old, new);
}
+/**
+ * atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_cmpxchg_release(v, old, new);
+ return raw_atomic_long_cmpxchg_release(v, old, new);
}
+/**
+ * atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_cmpxchg_relaxed(v, old, new);
+ return raw_atomic_long_cmpxchg_relaxed(v, old, new);
}
+/**
+ * atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_long_try_cmpxchg(v, old, new);
-}
-
+ return raw_atomic_long_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_long_try_cmpxchg_acquire(v, old, new);
-}
-
+ return raw_atomic_long_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_long_try_cmpxchg_release(v, old, new);
-}
-
+ return raw_atomic_long_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
instrument_atomic_read_write(v, sizeof(*v));
instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_long_try_cmpxchg_relaxed(v, old, new);
-}
-
+ return raw_atomic_long_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
atomic_long_sub_and_test(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_sub_and_test(i, v);
+ return raw_atomic_long_sub_and_test(i, v);
}
+/**
+ * atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
atomic_long_dec_and_test(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_dec_and_test(v);
+ return raw_atomic_long_dec_and_test(v);
}
+/**
+ * atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
atomic_long_inc_and_test(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_inc_and_test(v);
+ return raw_atomic_long_inc_and_test(v);
}
+/**
+ * atomic_long_add_negative() - atomic add and test if negative with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic_long_add_negative(long i, atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_add_negative(i, v);
+ return raw_atomic_long_add_negative(i, v);
}
+/**
+ * atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic_long_add_negative_acquire(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_add_negative_acquire(i, v);
+ return raw_atomic_long_add_negative_acquire(i, v);
}
+/**
+ * atomic_long_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic_long_add_negative_release(long i, atomic_long_t *v)
{
kcsan_release();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_add_negative_release(i, v);
+ return raw_atomic_long_add_negative_release(i, v);
}
+/**
+ * atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
{
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_add_negative_relaxed(i, v);
+ return raw_atomic_long_add_negative_relaxed(i, v);
}
+/**
+ * atomic_long_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_fetch_add_unless(v, a, u);
+ return raw_atomic_long_fetch_add_unless(v, a, u);
}
+/**
+ * atomic_long_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_add_unless(v, a, u);
+ return raw_atomic_long_add_unless(v, a, u);
}
+/**
+ * atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic_long_inc_not_zero(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_inc_not_zero(v);
+ return raw_atomic_long_inc_not_zero(v);
}
+/**
+ * atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic_long_inc_unless_negative(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_inc_unless_negative(v);
+ return raw_atomic_long_inc_unless_negative(v);
}
+/**
+ * atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
atomic_long_dec_unless_positive(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_dec_unless_positive(v);
+ return raw_atomic_long_dec_unless_positive(v);
}
+/**
+ * atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
static __always_inline long
atomic_long_dec_if_positive(atomic_long_t *v)
{
kcsan_mb();
instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_long_dec_if_positive(v);
+ return raw_atomic_long_dec_if_positive(v);
}
#define xchg(ptr, ...) \
@@ -1949,14 +4713,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg(__ai_ptr, __VA_ARGS__); \
+ raw_xchg(__ai_ptr, __VA_ARGS__); \
})
#define xchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
+ raw_xchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#define xchg_release(ptr, ...) \
@@ -1964,14 +4728,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_release(__ai_ptr, __VA_ARGS__); \
+ raw_xchg_release(__ai_ptr, __VA_ARGS__); \
})
#define xchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
+ raw_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg(ptr, ...) \
@@ -1979,14 +4743,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_release(ptr, ...) \
@@ -1994,14 +4758,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64(ptr, ...) \
@@ -2009,14 +4773,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg64(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_acquire(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_release(ptr, ...) \
@@ -2024,14 +4788,44 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_relaxed(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \
})
#define try_cmpxchg(ptr, oldp, ...) \
@@ -2041,7 +4835,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg_acquire(ptr, oldp, ...) \
@@ -2050,7 +4844,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg_release(ptr, oldp, ...) \
@@ -2060,7 +4854,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg_relaxed(ptr, oldp, ...) \
@@ -2069,7 +4863,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64(ptr, oldp, ...) \
@@ -2079,7 +4873,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_acquire(ptr, oldp, ...) \
@@ -2088,7 +4882,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_release(ptr, oldp, ...) \
@@ -2098,7 +4892,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
kcsan_release(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_relaxed(ptr, oldp, ...) \
@@ -2107,21 +4901,66 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define cmpxchg_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
})
#define cmpxchg64_local(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
+ raw_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \
})
#define sync_cmpxchg(ptr, ...) \
@@ -2129,7 +4968,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(ptr) __ai_ptr = (ptr); \
kcsan_mb(); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
+ raw_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
#define try_cmpxchg_local(ptr, oldp, ...) \
@@ -2138,7 +4977,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
#define try_cmpxchg64_local(ptr, oldp, ...) \
@@ -2147,24 +4986,26 @@ atomic_long_dec_if_positive(atomic_long_t *v)
typeof(oldp) __ai_oldp = (oldp); \
instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+ raw_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-#define cmpxchg_double(ptr, ...) \
+#define try_cmpxchg128_local(ptr, oldp, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- kcsan_mb(); \
- instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
-
-#define cmpxchg_double_local(ptr, ...) \
+#define sync_try_cmpxchg(ptr, ...) \
({ \
typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \
})
+
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// 6b513a42e1a1b5962532a019b7fc91eaa044ad5e
+// 2cc4bc990fef44d3836ec108f11b610f3f438184
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index 2fc51ba66beb..c82947170ddc 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -21,1030 +21,1778 @@ typedef atomic_t atomic_long_t;
#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
#endif
-#ifdef CONFIG_64BIT
-
-static __always_inline long
-arch_atomic_long_read(const atomic_long_t *v)
-{
- return arch_atomic64_read(v);
-}
-
-static __always_inline long
-arch_atomic_long_read_acquire(const atomic_long_t *v)
-{
- return arch_atomic64_read_acquire(v);
-}
-
-static __always_inline void
-arch_atomic_long_set(atomic_long_t *v, long i)
-{
- arch_atomic64_set(v, i);
-}
-
-static __always_inline void
-arch_atomic_long_set_release(atomic_long_t *v, long i)
-{
- arch_atomic64_set_release(v, i);
-}
-
-static __always_inline void
-arch_atomic_long_add(long i, atomic_long_t *v)
-{
- arch_atomic64_add(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_add_return(long i, atomic_long_t *v)
-{
- return arch_atomic64_add_return(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
- return arch_atomic64_add_return_acquire(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_add_return_release(long i, atomic_long_t *v)
-{
- return arch_atomic64_add_return_release(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
- return arch_atomic64_add_return_relaxed(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_add(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_add(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_add_acquire(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_add_release(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-arch_atomic_long_sub(long i, atomic_long_t *v)
-{
- arch_atomic64_sub(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return arch_atomic64_sub_return(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
- return arch_atomic64_sub_return_acquire(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
- return arch_atomic64_sub_return_release(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
- return arch_atomic64_sub_return_relaxed(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_sub(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_sub_acquire(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_sub_release(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-arch_atomic_long_inc(atomic_long_t *v)
-{
- arch_atomic64_inc(v);
-}
-
-static __always_inline long
-arch_atomic_long_inc_return(atomic_long_t *v)
-{
- return arch_atomic64_inc_return(v);
-}
-
-static __always_inline long
-arch_atomic_long_inc_return_acquire(atomic_long_t *v)
-{
- return arch_atomic64_inc_return_acquire(v);
-}
-
-static __always_inline long
-arch_atomic_long_inc_return_release(atomic_long_t *v)
-{
- return arch_atomic64_inc_return_release(v);
-}
-
-static __always_inline long
-arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
- return arch_atomic64_inc_return_relaxed(v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_inc(atomic_long_t *v)
-{
- return arch_atomic64_fetch_inc(v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
- return arch_atomic64_fetch_inc_acquire(v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_inc_release(atomic_long_t *v)
-{
- return arch_atomic64_fetch_inc_release(v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
- return arch_atomic64_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-arch_atomic_long_dec(atomic_long_t *v)
-{
- arch_atomic64_dec(v);
-}
-
-static __always_inline long
-arch_atomic_long_dec_return(atomic_long_t *v)
-{
- return arch_atomic64_dec_return(v);
-}
-
-static __always_inline long
-arch_atomic_long_dec_return_acquire(atomic_long_t *v)
-{
- return arch_atomic64_dec_return_acquire(v);
-}
-
-static __always_inline long
-arch_atomic_long_dec_return_release(atomic_long_t *v)
-{
- return arch_atomic64_dec_return_release(v);
-}
-
-static __always_inline long
-arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
- return arch_atomic64_dec_return_relaxed(v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_dec(atomic_long_t *v)
-{
- return arch_atomic64_fetch_dec(v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
- return arch_atomic64_fetch_dec_acquire(v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_dec_release(atomic_long_t *v)
-{
- return arch_atomic64_fetch_dec_release(v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
- return arch_atomic64_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-arch_atomic_long_and(long i, atomic_long_t *v)
-{
- arch_atomic64_and(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_and(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_and(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_and_acquire(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_and_release(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-arch_atomic_long_andnot(long i, atomic_long_t *v)
-{
- arch_atomic64_andnot(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_andnot(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_andnot_release(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-arch_atomic_long_or(long i, atomic_long_t *v)
-{
- arch_atomic64_or(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_or(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_or(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_or_acquire(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_or_release(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-arch_atomic_long_xor(long i, atomic_long_t *v)
-{
- arch_atomic64_xor(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_xor(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_xor_acquire(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_xor_release(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
- return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_xchg(atomic_long_t *v, long i)
-{
- return arch_atomic64_xchg(v, i);
-}
-
-static __always_inline long
-arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
- return arch_atomic64_xchg_acquire(v, i);
-}
-
-static __always_inline long
-arch_atomic_long_xchg_release(atomic_long_t *v, long i)
-{
- return arch_atomic64_xchg_release(v, i);
-}
-
-static __always_inline long
-arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
- return arch_atomic64_xchg_relaxed(v, i);
-}
-
-static __always_inline long
-arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return arch_atomic64_cmpxchg(v, old, new);
-}
-
-static __always_inline long
-arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+/**
+ * raw_atomic_long_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+raw_atomic_long_read(const atomic_long_t *v)
{
- return arch_atomic64_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline long
-arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
- return arch_atomic64_cmpxchg_release(v, old, new);
-}
-
-static __always_inline long
-arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
- return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
- return arch_atomic64_try_cmpxchg(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
- return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
- return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
- return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
- return arch_atomic64_sub_and_test(i, v);
-}
-
-static __always_inline bool
-arch_atomic_long_dec_and_test(atomic_long_t *v)
-{
- return arch_atomic64_dec_and_test(v);
-}
-
-static __always_inline bool
-arch_atomic_long_inc_and_test(atomic_long_t *v)
-{
- return arch_atomic64_inc_and_test(v);
-}
-
-static __always_inline bool
-arch_atomic_long_add_negative(long i, atomic_long_t *v)
-{
- return arch_atomic64_add_negative(i, v);
-}
-
-static __always_inline bool
-arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
-{
- return arch_atomic64_add_negative_acquire(i, v);
-}
-
-static __always_inline bool
-arch_atomic_long_add_negative_release(long i, atomic_long_t *v)
-{
- return arch_atomic64_add_negative_release(i, v);
-}
-
-static __always_inline bool
-arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
-{
- return arch_atomic64_add_negative_relaxed(i, v);
-}
-
-static __always_inline long
-arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
- return arch_atomic64_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
- return arch_atomic64_add_unless(v, a, u);
-}
-
-static __always_inline bool
-arch_atomic_long_inc_not_zero(atomic_long_t *v)
-{
- return arch_atomic64_inc_not_zero(v);
-}
-
-static __always_inline bool
-arch_atomic_long_inc_unless_negative(atomic_long_t *v)
-{
- return arch_atomic64_inc_unless_negative(v);
-}
-
-static __always_inline bool
-arch_atomic_long_dec_unless_positive(atomic_long_t *v)
-{
- return arch_atomic64_dec_unless_positive(v);
-}
-
-static __always_inline long
-arch_atomic_long_dec_if_positive(atomic_long_t *v)
-{
- return arch_atomic64_dec_if_positive(v);
-}
-
-#else /* CONFIG_64BIT */
-
-static __always_inline long
-arch_atomic_long_read(const atomic_long_t *v)
-{
- return arch_atomic_read(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_read(v);
+#else
+ return raw_atomic_read(v);
+#endif
}
+/**
+ * raw_atomic_long_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
static __always_inline long
-arch_atomic_long_read_acquire(const atomic_long_t *v)
+raw_atomic_long_read_acquire(const atomic_long_t *v)
{
- return arch_atomic_read_acquire(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_read_acquire(v);
+#else
+ return raw_atomic_read_acquire(v);
+#endif
}
+/**
+ * raw_atomic_long_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_set(atomic_long_t *v, long i)
+raw_atomic_long_set(atomic_long_t *v, long i)
{
- arch_atomic_set(v, i);
+#ifdef CONFIG_64BIT
+ raw_atomic64_set(v, i);
+#else
+ raw_atomic_set(v, i);
+#endif
}
+/**
+ * raw_atomic_long_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_set_release(atomic_long_t *v, long i)
+raw_atomic_long_set_release(atomic_long_t *v, long i)
{
- arch_atomic_set_release(v, i);
+#ifdef CONFIG_64BIT
+ raw_atomic64_set_release(v, i);
+#else
+ raw_atomic_set_release(v, i);
+#endif
}
+/**
+ * raw_atomic_long_add() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_add(long i, atomic_long_t *v)
+raw_atomic_long_add(long i, atomic_long_t *v)
{
- arch_atomic_add(i, v);
+#ifdef CONFIG_64BIT
+ raw_atomic64_add(i, v);
+#else
+ raw_atomic_add(i, v);
+#endif
}
+/**
+ * raw_atomic_long_add_return() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_add_return(long i, atomic_long_t *v)
+raw_atomic_long_add_return(long i, atomic_long_t *v)
{
- return arch_atomic_add_return(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return(i, v);
+#else
+ return raw_atomic_add_return(i, v);
+#endif
}
+/**
+ * raw_atomic_long_add_return_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+raw_atomic_long_add_return_acquire(long i, atomic_long_t *v)
{
- return arch_atomic_add_return_acquire(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_acquire(i, v);
+#else
+ return raw_atomic_add_return_acquire(i, v);
+#endif
}
+/**
+ * raw_atomic_long_add_return_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_add_return_release(long i, atomic_long_t *v)
+raw_atomic_long_add_return_release(long i, atomic_long_t *v)
{
- return arch_atomic_add_return_release(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_release(i, v);
+#else
+ return raw_atomic_add_return_release(i, v);
+#endif
}
+/**
+ * raw_atomic_long_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+raw_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
{
- return arch_atomic_add_return_relaxed(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_relaxed(i, v);
+#else
+ return raw_atomic_add_return_relaxed(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_add() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_add(long i, atomic_long_t *v)
+raw_atomic_long_fetch_add(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_add(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add(i, v);
+#else
+ return raw_atomic_fetch_add(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+raw_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_add_acquire(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_acquire(i, v);
+#else
+ return raw_atomic_fetch_add_acquire(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_add_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+raw_atomic_long_fetch_add_release(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_add_release(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_release(i, v);
+#else
+ return raw_atomic_fetch_add_release(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+raw_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_add_relaxed(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_relaxed(i, v);
+#else
+ return raw_atomic_fetch_add_relaxed(i, v);
+#endif
}
+/**
+ * raw_atomic_long_sub() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_sub(long i, atomic_long_t *v)
+raw_atomic_long_sub(long i, atomic_long_t *v)
{
- arch_atomic_sub(i, v);
+#ifdef CONFIG_64BIT
+ raw_atomic64_sub(i, v);
+#else
+ raw_atomic_sub(i, v);
+#endif
}
+/**
+ * raw_atomic_long_sub_return() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_sub_return(long i, atomic_long_t *v)
+raw_atomic_long_sub_return(long i, atomic_long_t *v)
{
- return arch_atomic_sub_return(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return(i, v);
+#else
+ return raw_atomic_sub_return(i, v);
+#endif
}
+/**
+ * raw_atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+raw_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
{
- return arch_atomic_sub_return_acquire(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_acquire(i, v);
+#else
+ return raw_atomic_sub_return_acquire(i, v);
+#endif
}
+/**
+ * raw_atomic_long_sub_return_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_sub_return_release(long i, atomic_long_t *v)
+raw_atomic_long_sub_return_release(long i, atomic_long_t *v)
{
- return arch_atomic_sub_return_release(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_release(i, v);
+#else
+ return raw_atomic_sub_return_release(i, v);
+#endif
}
+/**
+ * raw_atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+raw_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
{
- return arch_atomic_sub_return_relaxed(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_relaxed(i, v);
+#else
+ return raw_atomic_sub_return_relaxed(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_sub() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_sub(long i, atomic_long_t *v)
+raw_atomic_long_fetch_sub(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_sub(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub(i, v);
+#else
+ return raw_atomic_fetch_sub(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+raw_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_sub_acquire(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_acquire(i, v);
+#else
+ return raw_atomic_fetch_sub_acquire(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_sub_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+raw_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_sub_release(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_release(i, v);
+#else
+ return raw_atomic_fetch_sub_release(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+raw_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_sub_relaxed(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_relaxed(i, v);
+#else
+ return raw_atomic_fetch_sub_relaxed(i, v);
+#endif
}
+/**
+ * raw_atomic_long_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_inc(atomic_long_t *v)
+raw_atomic_long_inc(atomic_long_t *v)
{
- arch_atomic_inc(v);
+#ifdef CONFIG_64BIT
+ raw_atomic64_inc(v);
+#else
+ raw_atomic_inc(v);
+#endif
}
+/**
+ * raw_atomic_long_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_inc_return(atomic_long_t *v)
+raw_atomic_long_inc_return(atomic_long_t *v)
{
- return arch_atomic_inc_return(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return(v);
+#else
+ return raw_atomic_inc_return(v);
+#endif
}
+/**
+ * raw_atomic_long_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_inc_return_acquire(atomic_long_t *v)
+raw_atomic_long_inc_return_acquire(atomic_long_t *v)
{
- return arch_atomic_inc_return_acquire(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_acquire(v);
+#else
+ return raw_atomic_inc_return_acquire(v);
+#endif
}
+/**
+ * raw_atomic_long_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_inc_return_release(atomic_long_t *v)
+raw_atomic_long_inc_return_release(atomic_long_t *v)
{
- return arch_atomic_inc_return_release(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_release(v);
+#else
+ return raw_atomic_inc_return_release(v);
+#endif
}
+/**
+ * raw_atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_inc_return_relaxed(atomic_long_t *v)
+raw_atomic_long_inc_return_relaxed(atomic_long_t *v)
{
- return arch_atomic_inc_return_relaxed(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_relaxed(v);
+#else
+ return raw_atomic_inc_return_relaxed(v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_inc(atomic_long_t *v)
+raw_atomic_long_fetch_inc(atomic_long_t *v)
{
- return arch_atomic_fetch_inc(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc(v);
+#else
+ return raw_atomic_fetch_inc(v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+raw_atomic_long_fetch_inc_acquire(atomic_long_t *v)
{
- return arch_atomic_fetch_inc_acquire(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_acquire(v);
+#else
+ return raw_atomic_fetch_inc_acquire(v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_inc_release(atomic_long_t *v)
+raw_atomic_long_fetch_inc_release(atomic_long_t *v)
{
- return arch_atomic_fetch_inc_release(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_release(v);
+#else
+ return raw_atomic_fetch_inc_release(v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+raw_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
{
- return arch_atomic_fetch_inc_relaxed(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_relaxed(v);
+#else
+ return raw_atomic_fetch_inc_relaxed(v);
+#endif
}
+/**
+ * raw_atomic_long_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_dec(atomic_long_t *v)
+raw_atomic_long_dec(atomic_long_t *v)
{
- arch_atomic_dec(v);
+#ifdef CONFIG_64BIT
+ raw_atomic64_dec(v);
+#else
+ raw_atomic_dec(v);
+#endif
}
+/**
+ * raw_atomic_long_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_dec_return(atomic_long_t *v)
+raw_atomic_long_dec_return(atomic_long_t *v)
{
- return arch_atomic_dec_return(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return(v);
+#else
+ return raw_atomic_dec_return(v);
+#endif
}
+/**
+ * raw_atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_dec_return_acquire(atomic_long_t *v)
+raw_atomic_long_dec_return_acquire(atomic_long_t *v)
{
- return arch_atomic_dec_return_acquire(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_acquire(v);
+#else
+ return raw_atomic_dec_return_acquire(v);
+#endif
}
+/**
+ * raw_atomic_long_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_dec_return_release(atomic_long_t *v)
+raw_atomic_long_dec_return_release(atomic_long_t *v)
{
- return arch_atomic_dec_return_release(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_release(v);
+#else
+ return raw_atomic_dec_return_release(v);
+#endif
}
+/**
+ * raw_atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
static __always_inline long
-arch_atomic_long_dec_return_relaxed(atomic_long_t *v)
+raw_atomic_long_dec_return_relaxed(atomic_long_t *v)
{
- return arch_atomic_dec_return_relaxed(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_relaxed(v);
+#else
+ return raw_atomic_dec_return_relaxed(v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_dec(atomic_long_t *v)
+raw_atomic_long_fetch_dec(atomic_long_t *v)
{
- return arch_atomic_fetch_dec(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec(v);
+#else
+ return raw_atomic_fetch_dec(v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+raw_atomic_long_fetch_dec_acquire(atomic_long_t *v)
{
- return arch_atomic_fetch_dec_acquire(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_acquire(v);
+#else
+ return raw_atomic_fetch_dec_acquire(v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_dec_release(atomic_long_t *v)
+raw_atomic_long_fetch_dec_release(atomic_long_t *v)
{
- return arch_atomic_fetch_dec_release(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_release(v);
+#else
+ return raw_atomic_fetch_dec_release(v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+raw_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
{
- return arch_atomic_fetch_dec_relaxed(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_relaxed(v);
+#else
+ return raw_atomic_fetch_dec_relaxed(v);
+#endif
}
+/**
+ * raw_atomic_long_and() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_and(long i, atomic_long_t *v)
+raw_atomic_long_and(long i, atomic_long_t *v)
{
- arch_atomic_and(i, v);
+#ifdef CONFIG_64BIT
+ raw_atomic64_and(i, v);
+#else
+ raw_atomic_and(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_and() - atomic bitwise AND with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_and(long i, atomic_long_t *v)
+raw_atomic_long_fetch_and(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_and(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and(i, v);
+#else
+ return raw_atomic_fetch_and(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+raw_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_and_acquire(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_acquire(i, v);
+#else
+ return raw_atomic_fetch_and_acquire(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+raw_atomic_long_fetch_and_release(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_and_release(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_release(i, v);
+#else
+ return raw_atomic_fetch_and_release(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+raw_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_and_relaxed(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_relaxed(i, v);
+#else
+ return raw_atomic_fetch_and_relaxed(i, v);
+#endif
}
+/**
+ * raw_atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_andnot(long i, atomic_long_t *v)
+raw_atomic_long_andnot(long i, atomic_long_t *v)
{
- arch_atomic_andnot(i, v);
+#ifdef CONFIG_64BIT
+ raw_atomic64_andnot(i, v);
+#else
+ raw_atomic_andnot(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+raw_atomic_long_fetch_andnot(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_andnot(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_andnot(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+raw_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_andnot_acquire(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_acquire(i, v);
+#else
+ return raw_atomic_fetch_andnot_acquire(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+raw_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_andnot_release(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_release(i, v);
+#else
+ return raw_atomic_fetch_andnot_release(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+raw_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_andnot_relaxed(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_relaxed(i, v);
+#else
+ return raw_atomic_fetch_andnot_relaxed(i, v);
+#endif
}
+/**
+ * raw_atomic_long_or() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_or(long i, atomic_long_t *v)
+raw_atomic_long_or(long i, atomic_long_t *v)
{
- arch_atomic_or(i, v);
+#ifdef CONFIG_64BIT
+ raw_atomic64_or(i, v);
+#else
+ raw_atomic_or(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_or() - atomic bitwise OR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_or(long i, atomic_long_t *v)
+raw_atomic_long_fetch_or(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_or(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or(i, v);
+#else
+ return raw_atomic_fetch_or(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+raw_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_or_acquire(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_acquire(i, v);
+#else
+ return raw_atomic_fetch_or_acquire(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+raw_atomic_long_fetch_or_release(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_or_release(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_release(i, v);
+#else
+ return raw_atomic_fetch_or_release(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+raw_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_or_relaxed(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_relaxed(i, v);
+#else
+ return raw_atomic_fetch_or_relaxed(i, v);
+#endif
}
+/**
+ * raw_atomic_long_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
static __always_inline void
-arch_atomic_long_xor(long i, atomic_long_t *v)
+raw_atomic_long_xor(long i, atomic_long_t *v)
{
- arch_atomic_xor(i, v);
+#ifdef CONFIG_64BIT
+ raw_atomic64_xor(i, v);
+#else
+ raw_atomic_xor(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_xor(long i, atomic_long_t *v)
+raw_atomic_long_fetch_xor(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_xor(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor(i, v);
+#else
+ return raw_atomic_fetch_xor(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+raw_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_xor_acquire(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_acquire(i, v);
+#else
+ return raw_atomic_fetch_xor_acquire(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+raw_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_xor_release(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_release(i, v);
+#else
+ return raw_atomic_fetch_xor_release(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
{
- return arch_atomic_fetch_xor_relaxed(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_relaxed(i, v);
+#else
+ return raw_atomic_fetch_xor_relaxed(i, v);
+#endif
}
+/**
+ * raw_atomic_long_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_xchg(atomic_long_t *v, long i)
+raw_atomic_long_xchg(atomic_long_t *v, long new)
{
- return arch_atomic_xchg(v, i);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg(v, new);
+#else
+ return raw_atomic_xchg(v, new);
+#endif
}
+/**
+ * raw_atomic_long_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_xchg_acquire(atomic_long_t *v, long i)
+raw_atomic_long_xchg_acquire(atomic_long_t *v, long new)
{
- return arch_atomic_xchg_acquire(v, i);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_acquire(v, new);
+#else
+ return raw_atomic_xchg_acquire(v, new);
+#endif
}
+/**
+ * raw_atomic_long_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_xchg_release(atomic_long_t *v, long i)
+raw_atomic_long_xchg_release(atomic_long_t *v, long new)
{
- return arch_atomic_xchg_release(v, i);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_release(v, new);
+#else
+ return raw_atomic_xchg_release(v, new);
+#endif
}
+/**
+ * raw_atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new)
{
- return arch_atomic_xchg_relaxed(v, i);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_relaxed(v, new);
+#else
+ return raw_atomic_xchg_relaxed(v, new);
+#endif
}
+/**
+ * raw_atomic_long_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
{
- return arch_atomic_cmpxchg(v, old, new);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_atomic_cmpxchg(v, old, new);
+#endif
}
+/**
+ * raw_atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
{
- return arch_atomic_cmpxchg_acquire(v, old, new);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_acquire(v, old, new);
+#else
+ return raw_atomic_cmpxchg_acquire(v, old, new);
+#endif
}
+/**
+ * raw_atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
{
- return arch_atomic_cmpxchg_release(v, old, new);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_release(v, old, new);
+#else
+ return raw_atomic_cmpxchg_release(v, old, new);
+#endif
}
+/**
+ * raw_atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
{
- return arch_atomic_cmpxchg_relaxed(v, old, new);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_relaxed(v, old, new);
+#else
+ return raw_atomic_cmpxchg_relaxed(v, old, new);
+#endif
}
+/**
+ * raw_atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
{
- return arch_atomic_try_cmpxchg(v, (int *)old, new);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg(v, (int *)old, new);
+#endif
}
+/**
+ * raw_atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
{
- return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_acquire(v, (int *)old, new);
+#endif
}
+/**
+ * raw_atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
{
- return arch_atomic_try_cmpxchg_release(v, (int *)old, new);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_release(v, (int *)old, new);
+#endif
}
+/**
+ * raw_atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, updates @old to the current value of @v.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
{
- return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+#endif
}
+/**
+ * raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_sub_and_test(long i, atomic_long_t *v)
+raw_atomic_long_sub_and_test(long i, atomic_long_t *v)
{
- return arch_atomic_sub_and_test(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_and_test(i, v);
+#else
+ return raw_atomic_sub_and_test(i, v);
+#endif
}
+/**
+ * raw_atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_dec_and_test(atomic_long_t *v)
+raw_atomic_long_dec_and_test(atomic_long_t *v)
{
- return arch_atomic_dec_and_test(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_and_test(v);
+#else
+ return raw_atomic_dec_and_test(v);
+#endif
}
+/**
+ * raw_atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_inc_and_test(atomic_long_t *v)
+raw_atomic_long_inc_and_test(atomic_long_t *v)
{
- return arch_atomic_inc_and_test(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_and_test(v);
+#else
+ return raw_atomic_inc_and_test(v);
+#endif
}
+/**
+ * raw_atomic_long_add_negative() - atomic add and test if negative with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_add_negative(long i, atomic_long_t *v)
+raw_atomic_long_add_negative(long i, atomic_long_t *v)
{
- return arch_atomic_add_negative(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative(i, v);
+#else
+ return raw_atomic_add_negative(i, v);
+#endif
}
+/**
+ * raw_atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+raw_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
{
- return arch_atomic_add_negative_acquire(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_acquire(i, v);
+#else
+ return raw_atomic_add_negative_acquire(i, v);
+#endif
}
+/**
+ * raw_atomic_long_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_add_negative_release(long i, atomic_long_t *v)
+raw_atomic_long_add_negative_release(long i, atomic_long_t *v)
{
- return arch_atomic_add_negative_release(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_release(i, v);
+#else
+ return raw_atomic_add_negative_release(i, v);
+#endif
}
+/**
+ * raw_atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
{
- return arch_atomic_add_negative_relaxed(i, v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_relaxed(i, v);
+#else
+ return raw_atomic_add_negative_relaxed(i, v);
+#endif
}
+/**
+ * raw_atomic_long_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
static __always_inline long
-arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
- return arch_atomic_fetch_add_unless(v, a, u);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_unless(v, a, u);
+#else
+ return raw_atomic_fetch_add_unless(v, a, u);
+#endif
}
+/**
+ * raw_atomic_long_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+raw_atomic_long_add_unless(atomic_long_t *v, long a, long u)
{
- return arch_atomic_add_unless(v, a, u);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_unless(v, a, u);
+#else
+ return raw_atomic_add_unless(v, a, u);
+#endif
}
+/**
+ * raw_atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_inc_not_zero(atomic_long_t *v)
+raw_atomic_long_inc_not_zero(atomic_long_t *v)
{
- return arch_atomic_inc_not_zero(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_not_zero(v);
+#else
+ return raw_atomic_inc_not_zero(v);
+#endif
}
+/**
+ * raw_atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_inc_unless_negative(atomic_long_t *v)
+raw_atomic_long_inc_unless_negative(atomic_long_t *v)
{
- return arch_atomic_inc_unless_negative(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_unless_negative(v);
+#else
+ return raw_atomic_inc_unless_negative(v);
+#endif
}
+/**
+ * raw_atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
static __always_inline bool
-arch_atomic_long_dec_unless_positive(atomic_long_t *v)
+raw_atomic_long_dec_unless_positive(atomic_long_t *v)
{
- return arch_atomic_dec_unless_positive(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_unless_positive(v);
+#else
+ return raw_atomic_dec_unless_positive(v);
+#endif
}
+/**
+ * raw_atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
static __always_inline long
-arch_atomic_long_dec_if_positive(atomic_long_t *v)
+raw_atomic_long_dec_if_positive(atomic_long_t *v)
{
- return arch_atomic_dec_if_positive(v);
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_if_positive(v);
+#else
+ return raw_atomic_dec_if_positive(v);
+#endif
}
-#endif /* CONFIG_64BIT */
#endif /* _LINUX_ATOMIC_LONG_H */
-// a194c07d7d2f4b0e178d3c118c919775d5d65f50
+// 4ef23f98c73cff96d239896175fd26b10b88899e
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 31086a72e32a..0050ef288ab3 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -36,6 +36,7 @@ struct mqstat;
struct audit_watch;
struct audit_tree;
struct sk_buff;
+struct kern_ipc_perm;
struct audit_krule {
u32 pflags;
@@ -117,6 +118,8 @@ enum audit_nfcfgop {
AUDIT_NFT_OP_OBJ_RESET,
AUDIT_NFT_OP_FLOWTABLE_REGISTER,
AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_RESET,
+ AUDIT_NFT_OP_RULE_RESET,
AUDIT_NFT_OP_INVALID,
};
@@ -130,8 +133,6 @@ extern unsigned compat_dir_class[];
extern unsigned compat_chattr_class[];
extern unsigned compat_signal_class[];
-extern int audit_classify_compat_syscall(int abi, unsigned syscall);
-
/* audit_names->type values */
#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
#define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
index 8fdb1afe251a..0e34d673ef17 100644
--- a/include/linux/audit_arch.h
+++ b/include/linux/audit_arch.h
@@ -21,4 +21,6 @@ enum auditsc_class_t {
AUDITSC_NVALS /* count */
};
+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
+
#endif
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index c15221dcb75e..8e177b67e82f 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -4,6 +4,11 @@
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/overflow.h>
+#include <uapi/linux/if_ether.h>
+
/* Description:
* This header file describes the Virtual Function (VF) - Physical Function
* (PF) communication protocol used by the drivers for all devices starting
@@ -114,6 +119,7 @@ enum virtchnl_ops {
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
/* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
@@ -240,6 +246,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
/* used to negotiate communicating link speeds in Mbps */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
@@ -268,10 +275,11 @@ struct virtchnl_vf_resource {
u32 rss_key_size;
u32 rss_lut_size;
- struct virtchnl_vsi_resource vsi_res[1];
+ struct virtchnl_vsi_resource vsi_res[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
+#define virtchnl_vf_resource_LEGACY_SIZEOF 36
/* VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
@@ -294,7 +302,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
*/
/* Rx queue config info */
@@ -306,7 +320,7 @@ struct virtchnl_rxq_info {
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
- u8 pad0;
+ u8 crc_disable;
u8 rxdid;
u8 pad1[2];
u64 dma_ring_addr;
@@ -340,10 +354,11 @@ struct virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
u32 pad;
- struct virtchnl_queue_pair_info qpair[1];
+ struct virtchnl_queue_pair_info qpair[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
+#define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
@@ -385,10 +400,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
struct virtchnl_irq_map_info {
u16 num_vectors;
- struct virtchnl_vector_map vecmap[1];
+ struct virtchnl_vector_map vecmap[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
+#define virtchnl_irq_map_info_LEGACY_SIZEOF 14
/* VIRTCHNL_OP_ENABLE_QUEUES
* VIRTCHNL_OP_DISABLE_QUEUES
@@ -459,10 +475,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
struct virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
- struct virtchnl_ether_addr list[1];
+ struct virtchnl_ether_addr list[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
+#define virtchnl_ether_addr_list_LEGACY_SIZEOF 12
/* VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
@@ -481,10 +498,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
struct virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
- u16 vlan_id[1];
+ u16 vlan_id[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
+#define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6
/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
* structures and opcodes.
@@ -711,10 +729,11 @@ struct virtchnl_vlan_filter_list_v2 {
u16 vport_id;
u16 num_elements;
u8 pad[4];
- struct virtchnl_vlan_filter filters[1];
+ struct virtchnl_vlan_filter filters[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
+#define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40
/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
* VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
@@ -866,18 +885,20 @@ VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
struct virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
+ u8 key[]; /* RSS hash key, packed bytes */
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
+#define virtchnl_rss_key_LEGACY_SIZEOF 6
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
- u8 lut[1]; /* RSS lookup table */
+ u8 lut[]; /* RSS lookup table */
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
+#define virtchnl_rss_lut_LEGACY_SIZEOF 6
/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
* VIRTCHNL_OP_SET_RSS_HENA
@@ -892,6 +913,29 @@ struct virtchnl_rss_hena {
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_HFUNC
+ * VF sends this message to configure the RSS hash function. Only supported
+ * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation.
+ * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
+ * by the PF.
+ */
+struct virtchnl_rss_hfunc {
+ u16 vsi_id;
+ u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
+ u32 reserved;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
+
/* VIRTCHNL_OP_ENABLE_CHANNELS
* VIRTCHNL_OP_DISABLE_CHANNELS
* VF sends these messages to enable or disable channels based on
@@ -911,10 +955,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
struct virtchnl_tc_info {
u32 num_tc;
u32 pad;
- struct virtchnl_channel_info list[1];
+ struct virtchnl_channel_info list[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
+#define virtchnl_tc_info_LEGACY_SIZEOF 24
/* VIRTCHNL_ADD_CLOUD_FILTER
* VIRTCHNL_DEL_CLOUD_FILTER
@@ -1052,10 +1097,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
struct virtchnl_rdma_qvlist_info {
u32 num_vectors;
- struct virtchnl_rdma_qv_info qv_info[1];
+ struct virtchnl_rdma_qv_info qv_info[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_rdma_qvlist_info);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
+#define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
@@ -1074,14 +1120,6 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
-/* Type of RSS algorithm */
-enum virtchnl_rss_algorithm {
- VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
- VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
- VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
- VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
-};
-
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
@@ -1367,6 +1405,31 @@ struct virtchnl_fdir_del {
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+#define __vss_byone(p, member, count, old) \
+ (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
+
+#define __vss_byelem(p, member, count, old) \
+ (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
+
+#define __vss_full(p, member, count, old) \
+ (struct_size(p, member, count) + (old - struct_size(p, member, 0)))
+
+#define __vss(type, func, p, member, count) \
+ struct type: func(p, member, count, type##_LEGACY_SIZEOF)
+
+#define virtchnl_struct_size(p, m, c) \
+ _Generic(*p, \
+ __vss(virtchnl_vf_resource, __vss_full, p, m, c), \
+ __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \
+ __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \
+ __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \
+ __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \
+ __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
+ __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_rss_key, __vss_byone, p, m, c), \
+ __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
+
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
@@ -1401,24 +1464,23 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
- valid_len += (vqc->num_queue_pairs *
- sizeof(struct
- virtchnl_queue_pair_info));
+ valid_len = virtchnl_struct_size(vqc, qpair,
+ vqc->num_queue_pairs);
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_irq_map_info);
+ valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
- valid_len += (vimi->num_vectors *
- sizeof(struct virtchnl_vector_map));
+ valid_len = virtchnl_struct_size(vimi, vecmap,
+ vimi->num_vectors);
if (vimi->num_vectors == 0)
err_msg_format = true;
}
@@ -1429,23 +1491,24 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
- valid_len = sizeof(struct virtchnl_ether_addr_list);
+ valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
- valid_len += veal->num_elements *
- sizeof(struct virtchnl_ether_addr);
+ valid_len = virtchnl_struct_size(veal, list,
+ veal->num_elements);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
- valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
- valid_len += vfl->num_elements * sizeof(u16);
+ valid_len = virtchnl_struct_size(vfl, vlan_id,
+ vfl->num_elements);
if (vfl->num_elements == 0)
err_msg_format = true;
}
@@ -1469,31 +1532,36 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
break;
case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_rdma_qvlist_info);
+ valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_rdma_qvlist_info *qv =
(struct virtchnl_rdma_qvlist_info *)msg;
- valid_len += ((qv->num_vectors - 1) *
- sizeof(struct virtchnl_rdma_qv_info));
+ valid_len = virtchnl_struct_size(qv, qv_info,
+ qv->num_vectors);
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- valid_len = sizeof(struct virtchnl_rss_key);
+ valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
- valid_len += vrk->key_len - 1;
+ valid_len = virtchnl_struct_size(vrk, key,
+ vrk->key_len);
}
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- valid_len = sizeof(struct virtchnl_rss_lut);
+ valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
- valid_len += vrl->lut_entries - 1;
+ valid_len = virtchnl_struct_size(vrl, lut,
+ vrl->lut_entries);
}
break;
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ valid_len = sizeof(struct virtchnl_rss_hfunc);
+ break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case VIRTCHNL_OP_SET_RSS_HENA:
@@ -1506,12 +1574,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
case VIRTCHNL_OP_ENABLE_CHANNELS:
- valid_len = sizeof(struct virtchnl_tc_info);
+ valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_tc_info *vti =
(struct virtchnl_tc_info *)msg;
- valid_len += (vti->num_tc - 1) *
- sizeof(struct virtchnl_channel_info);
+ valid_len = virtchnl_struct_size(vti, list,
+ vti->num_tc);
if (vti->num_tc == 0)
err_msg_format = true;
}
@@ -1538,13 +1606,13 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
break;
case VIRTCHNL_OP_ADD_VLAN_V2:
case VIRTCHNL_OP_DEL_VLAN_V2:
- valid_len = sizeof(struct virtchnl_vlan_filter_list_v2);
+ valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list_v2 *vfl =
(struct virtchnl_vlan_filter_list_v2 *)msg;
- valid_len += (vfl->num_elements - 1) *
- sizeof(struct virtchnl_vlan_filter);
+ valid_len = virtchnl_struct_size(vfl, filters,
+ vfl->num_elements);
if (vfl->num_elements == 0) {
err_msg_format = true;
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index ae12696ec492..2ad261082bba 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -141,8 +141,6 @@ struct bdi_writeback {
struct delayed_work dwork; /* work item used for writeback */
struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
- unsigned long dirty_sleep; /* last wait */
-
struct list_head bdi_node; /* anchored at bdi->wb_list */
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -179,6 +177,11 @@ struct backing_dev_info {
* any dirty wbs, which is depended upon by bdi_has_dirty().
*/
atomic_long_t tot_write_bandwidth;
+ /*
+ * Jiffies when last process was dirty throttled on this bdi. Used by
+ * blk-wbt.
+ */
+ unsigned long last_bdp_sleep;
struct bdi_writeback wb; /* the root writeback info for this bdi */
struct list_head wb_list; /* list of all wbs */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index fbad4fcd408e..1a97277f99b1 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -46,7 +46,6 @@ extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
-extern struct workqueue_struct *bdi_async_bio_wq;
static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
{
diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h
new file mode 100644
index 000000000000..3f1fe1774f1b
--- /dev/null
+++ b/include/linux/backing-file.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common helpers for stackable filesystems and backing files.
+ *
+ * Copyright (C) 2023 CTERA Networks.
+ */
+
+#ifndef _LINUX_BACKING_FILE_H
+#define _LINUX_BACKING_FILE_H
+
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/fs.h>
+
+struct backing_file_ctx {
+ const struct cred *cred;
+ struct file *user_file;
+ void (*accessed)(struct file *);
+ void (*end_write)(struct file *);
+};
+
+struct file *backing_file_open(const struct path *user_path, int flags,
+ const struct path *real_path,
+ const struct cred *cred);
+ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *ppos, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
+ struct backing_file_ctx *ctx);
+
+#endif /* _LINUX_BACKING_FILE_H */
diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h
index 2426276b9bd3..670f2dae692f 100644
--- a/include/linux/badblocks.h
+++ b/include/linux/badblocks.h
@@ -15,6 +15,7 @@
#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
+#define BB_END(x) (BB_OFFSET(x) + BB_LEN(x))
#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
/* Bad block numbers are stored sorted in a single page.
@@ -41,6 +42,12 @@ struct badblocks {
sector_t size; /* in sectors */
};
+struct badblocks_context {
+ sector_t start;
+ sector_t len;
+ int ack;
+};
+
int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors);
int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
@@ -63,4 +70,27 @@ static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb)
}
badblocks_exit(bb);
}
+
+static inline int badblocks_full(struct badblocks *bb)
+{
+ return (bb->count >= MAX_BADBLOCKS);
+}
+
+static inline int badblocks_empty(struct badblocks *bb)
+{
+ return (bb->count == 0);
+}
+
+static inline void set_changed(struct badblocks *bb)
+{
+ if (bb->changed != 1)
+ bb->changed = 1;
+}
+
+static inline void clear_changed(struct badblocks *bb)
+{
+ if (bb->changed != 0)
+ bb->changed = 0;
+}
+
#endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 8d51f69f9f5e..70f97f685bff 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -90,6 +90,16 @@ struct linux_binfmt {
#endif
} __randomize_layout;
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+struct binfmt_misc {
+ struct list_head entries;
+ rwlock_t entries_lock;
+ bool enabled;
+} __randomize_layout;
+
+extern struct binfmt_misc init_binfmt_misc;
+#endif
+
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
/* Registration of default binfmt handlers */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b3e7529ff55e..875d792bffff 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -229,7 +229,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
static inline bool bio_flagged(struct bio *bio, unsigned int bit)
{
- return (bio->bi_flags & (1U << bit)) != 0;
+ return bio->bi_flags & (1U << bit);
}
static inline void bio_set_flag(struct bio *bio, unsigned int bit)
@@ -253,6 +253,11 @@ static inline struct page *bio_first_page_all(struct bio *bio)
return bio_first_bvec_all(bio)->bv_page;
}
+static inline struct folio *bio_first_folio_all(struct bio *bio)
+{
+ return page_folio(bio_first_page_all(bio));
+}
+
static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
@@ -281,6 +286,11 @@ static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
{
struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
+ if (unlikely(i >= bio->bi_vcnt)) {
+ fi->folio = NULL;
+ return;
+ }
+
fi->folio = page_folio(bvec->bv_page);
fi->offset = bvec->bv_offset +
PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
@@ -298,10 +308,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
fi->offset = 0;
fi->length = min(folio_size(fi->folio), fi->_seg_count);
fi->_next = folio_next(fi->folio);
- } else if (fi->_i + 1 < bio->bi_vcnt) {
- bio_first_folio(fi, bio, fi->_i + 1);
} else {
- fi->folio = NULL;
+ bio_first_folio(fi, bio, fi->_i + 1);
}
}
@@ -319,6 +327,8 @@ enum bip_flags {
BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
+ BIP_INTEGRITY_USER = 1 << 5, /* Integrity payload is user address */
+ BIP_COPY_USER = 1 << 6, /* Kernel bounce buffer in use */
};
/*
@@ -465,14 +475,18 @@ extern void bio_uninit(struct bio *);
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
-int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
-bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off);
+int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
+ unsigned off);
+bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
+ size_t len, size_t off);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
int bio_add_zone_append_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset);
void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
+void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
+ size_t off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
void __bio_release_pages(struct bio *bio, bool mark_dirty);
@@ -484,11 +498,16 @@ extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern void bio_free_pages(struct bio *bio);
void guard_bio_eod(struct bio *bio);
-void zero_fill_bio(struct bio *bio);
+void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
+
+static inline void zero_fill_bio(struct bio *bio)
+{
+ zero_fill_bio_iter(bio, bio->bi_iter);
+}
static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
{
- if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+ if (bio_flagged(bio, BIO_PAGE_PINNED))
__bio_release_pages(bio, mark_dirty);
}
@@ -704,6 +723,7 @@ static inline bool bioset_initialized(struct bio_set *bs)
for_each_bio(_bio) \
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
+int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_prep(struct bio *);
@@ -775,6 +795,12 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
return 0;
}
+static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
+ ssize_t len, u32 seed)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
/*
@@ -787,7 +813,7 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
bio->bi_opf |= REQ_POLLED;
- if (!is_sync_kiocb(kiocb))
+ if (kiocb->ki_flags & IOCB_NOWAIT)
bio->bi_opf |= REQ_NOWAIT;
}
diff --git a/include/linux/bitmap-str.h b/include/linux/bitmap-str.h
new file mode 100644
index 000000000000..17caeca94cab
--- /dev/null
+++ b/include/linux/bitmap-str.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITMAP_STR_H
+#define __LINUX_BITMAP_STR_H
+
+int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits);
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits);
+extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits);
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits);
+int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+
+#endif /* __LINUX_BITMAP_STR_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 7d6d73b78147..99451431e4d6 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -6,10 +6,12 @@
#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/errno.h>
#include <linux/find.h>
#include <linux/limits.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/bitmap-str.h>
struct device;
@@ -200,14 +202,6 @@ bitmap_find_next_zero_area(unsigned long *map,
align_mask, 0);
}
-int bitmap_parse(const char *buf, unsigned int buflen,
- unsigned long *dst, int nbits);
-int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
-int bitmap_parselist(const char *buf, unsigned long *maskp,
- int nmaskbits);
-int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
int bitmap_bitremap(int oldbit,
@@ -216,23 +210,6 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
-int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
-void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
-int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-
-#ifdef __BIG_ENDIAN
-void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
-#else
-#define bitmap_copy_le bitmap_copy
-#endif
-int bitmap_print_to_pagebuf(bool list, char *buf,
- const unsigned long *maskp, int nmaskbits);
-
-extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count);
-
-extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
@@ -302,12 +279,10 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
#endif
/*
- * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32
- * machines the order of hi and lo parts of numbers match the bitmap structure.
- * In both cases conversion is not needed when copying data from/to arrays of
- * u64.
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. So,
+ * the conversion is not needed when copying data from/to arrays of u64.
*/
-#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
+#if BITS_PER_LONG == 32
void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
#else
@@ -521,6 +496,66 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
}
/**
+ * bitmap_release_region - release allocated bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to release
+ * @order: region size (log base 2 of number of bits) to release
+ *
+ * This is the complement to __bitmap_find_free_region() and releases
+ * the found region (by clearing it in the bitmap).
+ */
+static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ bitmap_clear(bitmap, pos, BIT(order));
+}
+
+/**
+ * bitmap_allocate_region - allocate bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to allocate
+ * @order: region size (log base 2 of number of bits) to allocate
+ *
+ * Allocate (set bits in) a specified region of a bitmap.
+ *
+ * Returns: 0 on success, or %-EBUSY if specified region wasn't
+ * free (not all bits were zero).
+ */
+static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ unsigned int len = BIT(order);
+
+ if (find_next_bit(bitmap, pos + len, pos) < pos + len)
+ return -EBUSY;
+ bitmap_set(bitmap, pos, len);
+ return 0;
+}
+
+/**
+ * bitmap_find_free_region - find a contiguous aligned mem region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @bits: number of bits in the bitmap
+ * @order: region size (log base 2 of number of bits) to find
+ *
+ * Find a region of free (zero) bits in a @bitmap of @bits bits and
+ * allocate them (set them to one). Only consider regions of length
+ * a power (@order) of two, aligned to that power of two, which
+ * makes the search algorithm much faster.
+ *
+ * Returns: the bit offset in bitmap of the allocated region,
+ * or -errno on failure.
+ */
+static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+{
+ unsigned int pos, end; /* scans bitmap by regions of size order */
+
+ for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) {
+ if (!bitmap_allocate_region(bitmap, pos, order))
+ return pos;
+ }
+ return -ENOMEM;
+}
+
+/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value
*
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
index e6802b69cdd6..90ab33cb5d0e 100644
--- a/include/linux/blk-crypto-profile.h
+++ b/include/linux/blk-crypto-profile.h
@@ -111,6 +111,7 @@ struct blk_crypto_profile {
* keyslots while ensuring that they can't be changed concurrently.
*/
struct rw_semaphore lock;
+ struct lock_class_key lockdep_key;
/* List of idle slots, with least recently used slot at front */
wait_queue_head_t idle_slots_wait_queue;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 06caacd77ed6..7a8150a5f051 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -28,22 +28,20 @@ typedef __u32 __bitwise req_flags_t;
/* drive already may have started this one */
#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* may not be passed by ioscheduler */
-#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
/* request for flush sequence */
#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
/* merge of different types, fail separately */
#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
-/* track inflight for MQ */
-#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
/* don't call prep for this one */
#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
+/* use hctx->sched_tags */
+#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << 8))
+/* use an I/O scheduler for this request */
+#define RQF_USE_SCHED ((__force req_flags_t)(1 << 9))
/* vaguely specified driver internal error. Ignored by the block layer */
#define RQF_FAILED ((__force req_flags_t)(1 << 10))
/* don't warn about errors */
#define RQF_QUIET ((__force req_flags_t)(1 << 11))
-/* elevator private data attached */
-#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
/* account into disk and partition IO statistics */
#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
/* runtime pm request */
@@ -59,13 +57,11 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
/* ->timeout has been called, don't expire again */
#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
-/* queue has elevator attached */
-#define RQF_ELV ((__force req_flags_t)(1 << 22))
-#define RQF_RESV ((__force req_flags_t)(1 << 23))
+#define RQF_RESV ((__force req_flags_t)(1 << 23))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
- (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+ (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
enum mq_rq_state {
MQ_RQ_IDLE = 0,
@@ -160,39 +156,30 @@ struct request {
/*
* The rb_node is only used inside the io scheduler, requests
- * are pruned when moved to the dispatch queue. So let the
- * completion_data share space with the rb_node.
+ * are pruned when moved to the dispatch queue. special_vec must
+ * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
+ * insert into an IO scheduler.
*/
union {
struct rb_node rb_node; /* sort/lookup */
struct bio_vec special_vec;
- void *completion_data;
};
-
/*
* Three pointers are available for the IO schedulers, if they need
- * more they have to dynamically allocate it. Flush requests are
- * never put on the IO scheduler. So let the flush fields share
- * space with the elevator data.
+ * more they have to dynamically allocate it.
*/
- union {
- struct {
- struct io_cq *icq;
- void *priv[2];
- } elv;
-
- struct {
- unsigned int seq;
- struct list_head list;
- rq_end_io_fn *saved_end_io;
- } flush;
- };
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
+ struct {
+ unsigned int seq;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+
+ u64 fifo_time;
/*
* completion callback.
@@ -208,7 +195,7 @@ static inline enum req_op req_op(const struct request *req)
static inline bool blk_rq_is_passthrough(struct request *rq)
{
- return blk_op_is_passthrough(req_op(rq));
+ return blk_op_is_passthrough(rq->cmd_flags);
}
static inline unsigned short req_get_ioprio(struct request *req)
@@ -404,11 +391,6 @@ struct blk_mq_hw_ctx {
*/
struct blk_mq_tags *sched_tags;
- /** @queued: Number of queued requests. */
- unsigned long queued;
- /** @run: Number of dispatched requests. */
- unsigned long run;
-
/** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
/** @queue_num: Index of this hardware queue. */
@@ -722,6 +704,8 @@ int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_free_request(struct request *rq);
+int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+ unsigned int poll_flags);
bool blk_mq_queue_inflight(struct request_queue *q);
@@ -746,8 +730,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
struct blk_mq_tags {
unsigned int nr_tags;
unsigned int nr_reserved_tags;
-
- atomic_t active_queues;
+ unsigned int active_queues;
struct sbitmap_queue bitmap_tags;
struct sbitmap_queue breserved_tags;
@@ -844,7 +827,13 @@ void blk_mq_end_request_batch(struct io_comp_batch *ib);
*/
static inline bool blk_mq_need_time_stamp(struct request *rq)
{
- return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
+ /*
+ * passthrough io doesn't use iostat accounting, cgroup stats
+ * and io scheduler functionalities.
+ */
+ if (blk_rq_is_passthrough(rq))
+ return false;
+ return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
}
static inline bool blk_mq_is_reserved_rq(struct request *rq)
@@ -860,7 +849,11 @@ static inline bool blk_mq_add_to_batch(struct request *req,
struct io_comp_batch *iob, int ioerror,
void (*complete)(struct io_comp_batch *))
{
- if (!iob || (req->rq_flags & RQF_ELV) || ioerror ||
+ /*
+ * blk_mq_end_request_batch() can't end request allocated from
+ * sched tags
+ */
+ if (!iob || (req->rq_flags & RQF_SCHED_TAGS) || ioerror ||
(req->end_io && !blk_rq_is_passthrough(req)))
return false;
@@ -1164,6 +1157,18 @@ static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
}
+/**
+ * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization.
+ * @rq: Request to examine.
+ *
+ * Note: REQ_OP_ZONE_APPEND requests do not require serialization.
+ */
+static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
+{
+ return op_needs_zoned_write_locking(req_op(rq)) &&
+ blk_rq_zone_is_seq(rq);
+}
+
bool blk_req_needs_zone_write_lock(struct request *rq);
bool blk_req_zone_write_trylock(struct request *rq);
void __blk_req_zone_write_lock(struct request *rq);
@@ -1194,6 +1199,11 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
return !blk_req_zone_is_write_locked(rq);
}
#else /* CONFIG_BLK_DEV_ZONED */
+static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
+{
+ return false;
+}
+
static inline bool blk_req_needs_zone_write_lock(struct request *rq)
{
return false;
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
index 2580e05a8ab6..004b38a538ff 100644
--- a/include/linux/blk-pm.h
+++ b/include/linux/blk-pm.h
@@ -15,7 +15,6 @@ extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
extern void blk_post_runtime_resume(struct request_queue *q);
-extern void blk_set_runtime_active(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
struct device *dev) {}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 740afe80f297..f288c94374b3 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -49,25 +49,26 @@ struct block_device {
bool bd_write_holder;
bool bd_has_submit_bio;
dev_t bd_dev;
+ struct inode *bd_inode; /* will die */
+
atomic_t bd_openers;
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
- struct inode * bd_inode; /* will die */
- struct super_block * bd_super;
void * bd_claiming;
void * bd_holder;
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
+ const struct blk_holder_ops *bd_holder_ops;
+ struct mutex bd_holder_lock;
int bd_holders;
struct kobject *bd_holder_dir;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
- struct super_block *bd_fsfreeze_sb;
+ atomic_t bd_fsfreeze_count; /* number of freeze requests */
+ struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */
struct partition_meta_info *bd_meta_info;
#ifdef CONFIG_FAIL_MAKE_REQUEST
bool bd_make_it_fail;
#endif
+ bool bd_ro_warned;
+ int bd_writers;
/*
* keep this out-of-line as it's both big and not needed in the fast
* path
@@ -101,7 +102,7 @@ typedef u16 blk_short_t;
#define BLK_STS_NOSPC ((__force blk_status_t)3)
#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
#define BLK_STS_TARGET ((__force blk_status_t)5)
-#define BLK_STS_NEXUS ((__force blk_status_t)6)
+#define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
#define BLK_STS_MEDIUM ((__force blk_status_t)7)
#define BLK_STS_PROTECTION ((__force blk_status_t)8)
#define BLK_STS_RESOURCE ((__force blk_status_t)9)
@@ -171,6 +172,12 @@ typedef u16 blk_short_t;
*/
#define BLK_STS_OFFLINE ((__force blk_status_t)17)
+/*
+ * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
+ * aborted the command because it exceeded one of its Command Duration Limits.
+ */
+#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18)
+
/**
* blk_path_error - returns true if error may be path related
* @error: status the request was completed with
@@ -189,7 +196,7 @@ static inline bool blk_path_error(blk_status_t error)
case BLK_STS_NOTSUPP:
case BLK_STS_NOSPC:
case BLK_STS_TARGET:
- case BLK_STS_NEXUS:
+ case BLK_STS_RESV_CONFLICT:
case BLK_STS_MEDIUM:
case BLK_STS_PROTECTION:
return false;
@@ -323,7 +330,7 @@ struct bio {
* bio flags
*/
enum {
- BIO_NO_PAGE_REF, /* don't put release vec pages */
+ BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
BIO_CLONED, /* doesn't own data */
BIO_BOUNCED, /* bio is a bounce bio */
BIO_QUIET, /* Make BIO Quiet */
@@ -371,6 +378,8 @@ enum req_op {
REQ_OP_DISCARD = (__force blk_opf_t)3,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
+ /* write data at the current zone write pointer */
+ REQ_OP_ZONE_APPEND = (__force blk_opf_t)7,
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
/* Open a zone */
@@ -379,12 +388,10 @@ enum req_op {
REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
/* Transition a zone to full */
REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
- /* write data at the current zone write pointer */
- REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
/* reset a zone write pointer */
- REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
+ REQ_OP_ZONE_RESET = (__force blk_opf_t)13,
/* reset all the zone present on the device */
- REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
+ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)15,
/* Driver private requests */
REQ_OP_DRV_IN = (__force blk_opf_t)34,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c0ffe203a602..99e4f5e72213 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -41,7 +41,7 @@ struct blk_stat_callback;
struct blk_crypto_profile;
extern const struct device_type disk_type;
-extern struct device_type part_type;
+extern const struct device_type part_type;
extern struct class block_class;
/*
@@ -112,6 +112,21 @@ struct blk_integrity {
unsigned char tag_size;
};
+typedef unsigned int __bitwise blk_mode_t;
+
+/* open for reading */
+#define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0))
+/* open for writing */
+#define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1))
+/* open exclusively (vs other exclusive openers */
+#define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2))
+/* opened with O_NDELAY */
+#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
+/* open for "writes" only for ioctls (specialy hack for floppy.c) */
+#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
+/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
+#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
+
struct gendisk {
/*
* major/first_minor/minors should not be set by any new driver, the
@@ -187,6 +202,7 @@ struct gendisk {
struct badblocks *bb;
struct lockdep_map lockdep_map;
u64 diskseq;
+ blk_mode_t open_mode;
/*
* Independent sector access ranges. This is always NULL for
@@ -250,18 +266,6 @@ static inline bool blk_op_is_passthrough(blk_opf_t op)
}
/*
- * Zoned block device models (zoned limit).
- *
- * Note: This needs to be ordered from the least to the most severe
- * restrictions for the inheritance in blk_stack_limits() to work.
- */
-enum blk_zoned_model {
- BLK_ZONED_NONE = 0, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
-};
-
-/*
* BLK_BOUNCE_NONE: never bounce (default)
* BLK_BOUNCE_HIGH: bounce all highmem pages
*/
@@ -302,7 +306,7 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
- enum blk_zoned_model zoned;
+ bool zoned;
/*
* Drivers that set dma_alignment to less than 511 must be prepared to
@@ -315,47 +319,15 @@ struct queue_limits {
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
-void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
-
-#ifdef CONFIG_BLK_DEV_ZONED
+void disk_set_zoned(struct gendisk *disk);
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
-unsigned int bdev_nr_zones(struct block_device *bdev);
-extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
- sector_t sectors, sector_t nr_sectors,
- gfp_t gfp_mask);
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
+ sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask);
int blk_revalidate_disk_zones(struct gendisk *disk,
- void (*update_driver_data)(struct gendisk *disk));
-
-extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-
-#else /* CONFIG_BLK_DEV_ZONED */
-
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- return 0;
-}
-
-static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
-static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
-#endif /* CONFIG_BLK_DEV_ZONED */
+ void (*update_driver_data)(struct gendisk *disk));
/*
* Independent access ranges: struct blk_independent_access_range describes
@@ -385,58 +357,51 @@ struct blk_independent_access_ranges {
};
struct request_queue {
- struct request *last_merge;
- struct elevator_queue *elevator;
-
- struct percpu_ref q_usage_counter;
+ /*
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
+ */
+ void *queuedata;
- struct blk_queue_stats *stats;
- struct rq_qos *rq_qos;
+ struct elevator_queue *elevator;
const struct blk_mq_ops *mq_ops;
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
+ /*
+ * various queue flags, see QUEUE_* below
+ */
+ unsigned long queue_flags;
+
+ unsigned int rq_timeout;
+
unsigned int queue_depth;
+ refcount_t refs;
+
/* hw dispatch queues */
- struct xarray hctx_table;
unsigned int nr_hw_queues;
+ struct xarray hctx_table;
- /*
- * The queue owner gets to use this for whatever they like.
- * ll_rw_blk doesn't touch it.
- */
- void *queuedata;
-
- /*
- * various queue flags, see QUEUE_* below
- */
- unsigned long queue_flags;
- /*
- * Number of contexts that have called blk_set_pm_only(). If this
- * counter is above zero then only RQF_PM requests are processed.
- */
- atomic_t pm_only;
+ struct percpu_ref q_usage_counter;
- /*
- * ida allocated id for this queue. Used to index queues from
- * ioctx.
- */
- int id;
+ struct request *last_merge;
spinlock_t queue_lock;
- struct gendisk *disk;
+ int quiesce_depth;
- refcount_t refs;
+ struct gendisk *disk;
/*
* mq queue kobject
*/
struct kobject *mq_kobj;
+ struct queue_limits limits;
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity integrity;
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -447,24 +412,40 @@ struct request_queue {
#endif
/*
- * queue settings
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM requests are processed.
*/
- unsigned long nr_requests; /* Max # of requests */
+ atomic_t pm_only;
+
+ struct blk_queue_stats *stats;
+ struct rq_qos *rq_qos;
+ struct mutex rq_qos_mutex;
+
+ /*
+ * ida allocated id for this queue. Used to index queues from
+ * ioctx.
+ */
+ int id;
unsigned int dma_pad_mask;
+ /*
+ * queue settings
+ */
+ unsigned long nr_requests; /* Max # of requests */
+
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;
struct kobject *crypto_kobject;
#endif
- unsigned int rq_timeout;
-
struct timer_list timeout;
struct work_struct timeout_work;
atomic_t nr_active_requests_shared_tags;
+ unsigned int required_elevator_features;
+
struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
@@ -475,11 +456,12 @@ struct request_queue {
struct mutex blkcg_mutex;
#endif
- struct queue_limits limits;
+ int node;
- unsigned int required_elevator_features;
+ spinlock_t requeue_lock;
+ struct list_head requeue_list;
+ struct delayed_work requeue_work;
- int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace __rcu *blk_trace;
#endif
@@ -487,10 +469,7 @@ struct request_queue {
* for flush operations
*/
struct blk_flush_queue *fq;
-
- struct list_head requeue_list;
- spinlock_t requeue_lock;
- struct delayed_work requeue_work;
+ struct list_head flush_list;
struct mutex sysfs_lock;
struct mutex sysfs_dir_lock;
@@ -516,8 +495,6 @@ struct request_queue {
*/
struct mutex mq_freeze_lock;
- int quiesce_depth;
-
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
@@ -545,6 +522,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
+#define QUEUE_FLAG_HW_WC 13 /* Write back caching supported */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
@@ -629,26 +607,14 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q)
}
#endif
-static inline enum blk_zoned_model
-blk_queue_zoned_model(struct request_queue *q)
-{
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
- return q->limits.zoned;
- return BLK_ZONED_NONE;
-}
-
static inline bool blk_queue_is_zoned(struct request_queue *q)
{
- switch (blk_queue_zoned_model(q)) {
- case BLK_ZONED_HA:
- case BLK_ZONED_HM:
- return true;
- default:
- return false;
- }
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) && q->limits.zoned;
}
#ifdef CONFIG_BLK_DEV_ZONED
+unsigned int bdev_nr_zones(struct block_device *bdev);
+
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
@@ -693,6 +659,11 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
}
#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return 0;
+}
+
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
@@ -757,7 +728,8 @@ static inline int bdev_read_only(struct block_device *bdev)
}
bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
-bool disk_force_media_change(struct gendisk *disk, unsigned int events);
+void disk_force_media_change(struct gendisk *disk);
+void bdev_mark_dead(struct block_device *bdev, bool surprise);
void add_disk_randomness(struct gendisk *disk) __latent_entropy;
void rand_initialize_disk(struct gendisk *disk);
@@ -815,8 +787,7 @@ int __register_blkdev(unsigned int major, const char *name,
__register_blkdev(major, name, NULL)
void unregister_blkdev(unsigned int major, const char *name);
-bool bdev_check_media_change(struct block_device *bdev);
-int __invalidate_device(struct block_device *bdev, bool kill_dirty);
+bool disk_check_media_change(struct gendisk *disk);
void set_capacity(struct gendisk *disk, sector_t size);
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
@@ -836,7 +807,6 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
dev_t part_devt(struct gendisk *disk, u8 partno);
void inc_diskseq(struct gendisk *disk);
-dev_t blk_lookup_devt(const char *name, int partno);
void blk_request_module(dev_t devt);
extern int blk_register_queue(struct gendisk *disk);
@@ -854,6 +824,7 @@ extern const char *blk_op_str(enum req_op op);
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
+const char *blk_status_to_str(blk_status_t status);
/* only poll the hardware once, don't continue until a completion was found */
#define BLK_POLL_ONESHOT (1 << 0)
@@ -977,7 +948,6 @@ struct blk_plug {
bool multiple_queues;
bool has_elevator;
- bool nowait;
struct list_head cb_list; /* md requires an unplug callback */
};
@@ -1087,7 +1057,14 @@ enum blk_default_limits {
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
-#define BLK_DEF_MAX_SECTORS 2560u
+/*
+ * Default upper limit for the software max_sectors limit used for
+ * regular file system I/O. This can be increased through sysfs.
+ *
+ * Not to be confused with the max_hw_sector limit that is entirely
+ * controlled by the driver, usually based on hardware limits.
+ */
+#define BLK_DEF_MAX_SECTORS_CAP 2560u
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
@@ -1266,11 +1243,6 @@ static inline bool bdev_nowait(struct block_device *bdev)
return test_bit(QUEUE_FLAG_NOWAIT, &bdev_get_queue(bdev)->queue_flags);
}
-static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
-{
- return blk_queue_zoned_model(bdev_get_queue(bdev));
-}
-
static inline bool bdev_is_zoned(struct block_device *bdev)
{
return blk_queue_is_zoned(bdev_get_queue(bdev));
@@ -1281,15 +1253,18 @@ static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
return disk_zone_no(bdev->bd_disk, sec);
}
-static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
- blk_opf_t op)
+/* Whether write serialization is required for @op on zoned devices. */
+static inline bool op_needs_zoned_write_locking(enum req_op op)
{
- if (!bdev_is_zoned(bdev))
- return false;
-
return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES;
}
+static inline bool bdev_op_is_zoned_write(struct block_device *bdev,
+ enum req_op op)
+{
+ return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op);
+}
+
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1380,10 +1355,12 @@ struct block_device_operations {
void (*submit_bio)(struct bio *bio);
int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
unsigned int flags);
- int (*open) (struct block_device *, fmode_t);
- void (*release) (struct gendisk *, fmode_t);
- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ int (*open)(struct gendisk *disk, blk_mode_t mode);
+ void (*release)(struct gendisk *disk);
+ int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
+ int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
void (*unlock_native_capacity) (struct gendisk *);
@@ -1410,7 +1387,7 @@ struct block_device_operations {
};
#ifdef CONFIG_COMPAT
-extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
+extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
unsigned int, unsigned long);
#else
#define blkdev_compat_ptr_ioctl NULL
@@ -1463,22 +1440,60 @@ void blkdev_show(struct seq_file *seqf, off_t offset);
#define BLKDEV_MAJOR_MAX 0
#endif
-struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder);
-struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
-int bd_prepare_to_claim(struct block_device *bdev, void *holder);
+struct blk_holder_ops {
+ void (*mark_dead)(struct block_device *bdev, bool surprise);
+
+ /*
+ * Sync the file system mounted on the block device.
+ */
+ void (*sync)(struct block_device *bdev);
+
+ /*
+ * Freeze the file system mounted on the block device.
+ */
+ int (*freeze)(struct block_device *bdev);
+
+ /*
+ * Thaw the file system mounted on the block device.
+ */
+ int (*thaw)(struct block_device *bdev);
+};
+
+/*
+ * For filesystems using @fs_holder_ops, the @holder argument passed to
+ * helpers used to open and claim block devices via
+ * bd_prepare_to_claim() must point to a superblock.
+ */
+extern const struct blk_holder_ops fs_holder_ops;
+
+/*
+ * Return the correct open flags for blkdev_get_by_* for super block flags
+ * as stored in sb->s_flags.
+ */
+#define sb_open_mode(flags) \
+ (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
+ (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+
+struct bdev_handle {
+ struct block_device *bdev;
+ void *holder;
+ blk_mode_t mode;
+};
+
+struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops);
+struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops);
+int bd_prepare_to_claim(struct block_device *bdev, void *holder,
+ const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
-void blkdev_put(struct block_device *bdev, fmode_t mode);
+void bdev_release(struct bdev_handle *handle);
/* just for blk-cgroup, don't use elsewhere */
struct block_device *blkdev_get_no_open(dev_t dev);
void blkdev_put_no_open(struct block_device *bdev);
-struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
-void bdev_add(struct block_device *bdev, dev_t dev);
struct block_device *I_BDEV(struct inode *inode);
-int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
- loff_t lend);
#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
@@ -1488,6 +1503,7 @@ int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
void bdev_statx_dioalign(struct inode *inode, struct kstat *stat);
void printk_all_partitions(void);
+int __init early_lookup_bdev(const char *pathname, dev_t *dev);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
@@ -1509,12 +1525,14 @@ static inline void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
static inline void printk_all_partitions(void)
{
}
+static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_BLOCK */
-int fsync_bdev(struct block_device *bdev);
-
-int freeze_bdev(struct block_device *bdev);
-int thaw_bdev(struct block_device *bdev);
+int bdev_freeze(struct block_device *bdev);
+int bdev_thaw(struct block_device *bdev);
struct io_comp_batch {
struct request *req_list;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index cfbda114348c..122c62e561fc 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -85,10 +85,14 @@ extern int blk_trace_remove(struct request_queue *q);
# define blk_add_driver_data(rq, data, len) do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
# define blk_trace_startstop(q, start) (-ENOTTY)
-# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
# define blk_trace_note_message_enabled(q) (false)
+
+static inline int blk_trace_remove(struct request_queue *q)
+{
+ return -ENOTTY;
+}
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_COMPAT
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index cc35d010fa94..cffa38a73618 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -3,6 +3,7 @@
#define __LINUX_BOOTMEM_INFO_H
#include <linux/mm.h>
+#include <linux/kmemleak.h>
/*
* Types for free bootmem stored in page->lru.next. These have to be in
@@ -59,6 +60,7 @@ static inline void get_page_bootmem(unsigned long info, struct page *page,
static inline void free_bootmem_page(struct page *page)
{
+ kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
free_reserved_page(page);
}
#endif
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
index 7b121bd780eb..0985221d5478 100644
--- a/include/linux/bpf-cgroup-defs.h
+++ b/include/linux/bpf-cgroup-defs.h
@@ -28,19 +28,24 @@ enum cgroup_bpf_attach_type {
CGROUP_INET6_BIND,
CGROUP_INET4_CONNECT,
CGROUP_INET6_CONNECT,
+ CGROUP_UNIX_CONNECT,
CGROUP_INET4_POST_BIND,
CGROUP_INET6_POST_BIND,
CGROUP_UDP4_SENDMSG,
CGROUP_UDP6_SENDMSG,
+ CGROUP_UNIX_SENDMSG,
CGROUP_SYSCTL,
CGROUP_UDP4_RECVMSG,
CGROUP_UDP6_RECVMSG,
+ CGROUP_UNIX_RECVMSG,
CGROUP_GETSOCKOPT,
CGROUP_SETSOCKOPT,
CGROUP_INET4_GETPEERNAME,
CGROUP_INET6_GETPEERNAME,
+ CGROUP_UNIX_GETPEERNAME,
CGROUP_INET4_GETSOCKNAME,
CGROUP_INET6_GETSOCKNAME,
+ CGROUP_UNIX_GETSOCKNAME,
CGROUP_INET_SOCK_RELEASE,
CGROUP_LSM_START,
CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1,
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 57e9e109257e..a789266feac3 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -48,19 +48,24 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
CGROUP_ATYPE(CGROUP_INET6_BIND);
CGROUP_ATYPE(CGROUP_INET4_CONNECT);
CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+ CGROUP_ATYPE(CGROUP_UNIX_CONNECT);
CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_SENDMSG);
CGROUP_ATYPE(CGROUP_SYSCTL);
CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_RECVMSG);
CGROUP_ATYPE(CGROUP_GETSOCKOPT);
CGROUP_ATYPE(CGROUP_SETSOCKOPT);
CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME);
CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
default:
return CGROUP_BPF_ATTACH_TYPE_INVALID;
@@ -120,6 +125,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
+ int *uaddrlen,
enum cgroup_bpf_attach_type atype,
void *t_ctx,
u32 *flags);
@@ -137,11 +143,12 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
- int *optname, char __user *optval,
+ int *optname, sockptr_t optval,
int *optlen, char **kernel_optval);
+
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *optlen, int max_optlen,
+ int optname, sockptr_t optval,
+ sockptr_t optlen, int max_optlen,
int retval);
int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
@@ -199,9 +206,9 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk) && \
+ if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \
cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
CGROUP_INET_EGRESS); \
@@ -230,22 +237,22 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- NULL, NULL); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
+ atype, NULL, NULL); \
__ret; \
})
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- t_ctx, NULL); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
+ atype, t_ctx, NULL); \
release_sock(sk); \
} \
__ret; \
@@ -256,14 +263,14 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
* (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
* should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
*/
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
({ \
u32 __flags = 0; \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- NULL, &__flags); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
+ atype, NULL, &__flags); \
release_sock(sk); \
if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
@@ -276,29 +283,38 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
(sk)->sk_prot->pre_connect)
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL)
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx)
-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL)
/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
* fullsock and its parent fullsock cannot be traced by
@@ -377,7 +393,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
- get_user(__ret, optlen); \
+ copy_from_sockptr(&__ret, optlen, sizeof(int)); \
__ret; \
})
@@ -477,24 +493,27 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
}
#define cgroup_bpf_enabled(atype) (0)
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e53ceee1df37..e30100597d0a 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -29,6 +29,7 @@
#include <linux/rcupdate_trace.h>
#include <linux/static_call.h>
#include <linux/memcontrol.h>
+#include <linux/cfi.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -55,7 +56,7 @@ struct cgroup;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
-extern struct bpf_mem_alloc bpf_global_ma;
+extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
extern bool bpf_global_ma_set;
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
@@ -106,7 +107,11 @@ struct bpf_map_ops {
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
- void (*map_fd_put_ptr)(void *ptr);
+ /* If need_defer is true, the implementation should guarantee that
+ * the to-be-put element is still alive before the bpf program, which
+ * may manipulate it, exists.
+ */
+ void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
@@ -180,14 +185,15 @@ enum btf_field_type {
BPF_TIMER = (1 << 1),
BPF_KPTR_UNREF = (1 << 2),
BPF_KPTR_REF = (1 << 3),
- BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
- BPF_LIST_HEAD = (1 << 4),
- BPF_LIST_NODE = (1 << 5),
- BPF_RB_ROOT = (1 << 6),
- BPF_RB_NODE = (1 << 7),
- BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
- BPF_RB_NODE | BPF_RB_ROOT,
- BPF_REFCOUNT = (1 << 8),
+ BPF_KPTR_PERCPU = (1 << 4),
+ BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
+ BPF_LIST_HEAD = (1 << 5),
+ BPF_LIST_NODE = (1 << 6),
+ BPF_RB_ROOT = (1 << 7),
+ BPF_RB_NODE = (1 << 8),
+ BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
+ BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
+ BPF_REFCOUNT = (1 << 9),
};
typedef void (*btf_dtor_kfunc_t)(void *);
@@ -228,6 +234,18 @@ struct btf_record {
struct btf_field fields[];
};
+/* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
+struct bpf_rb_node_kern {
+ struct rb_node rb_node;
+ void *owner;
+} __attribute__((aligned(8)));
+
+/* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
+struct bpf_list_node_kern {
+ struct list_head list_head;
+ void *owner;
+} __attribute__((aligned(8)));
+
struct bpf_map {
/* The first two cachelines with read-mostly members of which some
* are also accessed in fast-path (e.g. ops, max_entries).
@@ -259,7 +277,11 @@ struct bpf_map {
*/
atomic64_t refcnt ____cacheline_aligned;
atomic64_t usercnt;
- struct work_struct work;
+ /* rcu is used before freeing and work is only used during freeing */
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
struct mutex freeze_mutex;
atomic64_t writecnt;
/* 'Ownership' of program-containing map is claimed by the first program
@@ -275,6 +297,10 @@ struct bpf_map {
} owner;
bool bypass_spec_v1;
bool frozen; /* write-once; write-protected by freeze_mutex */
+ bool free_after_mult_rcu_gp;
+ bool free_after_rcu_gp;
+ atomic64_t sleepable_refcnt;
+ s64 __percpu *elem_count;
};
static inline const char *btf_field_type_name(enum btf_field_type type)
@@ -287,6 +313,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
return "kptr";
+ case BPF_KPTR_PERCPU:
+ return "percpu_kptr";
case BPF_LIST_HEAD:
return "bpf_list_head";
case BPF_LIST_NODE:
@@ -312,6 +340,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_timer);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
return sizeof(u64);
case BPF_LIST_HEAD:
return sizeof(struct bpf_list_head);
@@ -338,6 +367,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_timer);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
return __alignof__(u64);
case BPF_LIST_HEAD:
return __alignof__(struct bpf_list_head);
@@ -376,6 +406,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
case BPF_TIMER:
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
break;
default:
WARN_ON_ONCE(1);
@@ -425,7 +456,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
size /= sizeof(long);
while (size--)
- *ldst++ = *lsrc++;
+ data_race(*ldst++ = *lsrc++);
}
/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
@@ -640,7 +671,8 @@ enum bpf_type_flag {
MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
/* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
- * Currently only valid for linked-list and rbtree nodes.
+ * Currently only valid for linked-list and rbtree nodes. If the nodes
+ * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
*/
NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
@@ -889,10 +921,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
+static bool bpf_is_ldimm64(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+}
+
static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
{
- return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
- insn->src_reg == BPF_PSEUDO_FUNC;
+ return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
}
struct bpf_prog_ops {
@@ -1015,6 +1051,22 @@ struct btf_func_model {
*/
#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+/* Indicate that current trampoline is in a tail call context. Then, it has to
+ * cache and restore tail_call_cnt to avoid infinite tail call loop.
+ */
+#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
+
+/*
+ * Indicate the trampoline should be suitable to receive indirect calls;
+ * without this indirectly calling the generated code can result in #UD/#CP,
+ * depending on the CFI options.
+ *
+ * Used by bpf_struct_ops.
+ *
+ * Incompatible with FENTRY usage, overloads @func_addr argument.
+ */
+#define BPF_TRAMP_F_INDIRECT BIT(8)
+
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86.
*/
@@ -1054,10 +1106,17 @@ struct bpf_tramp_run_ctx;
* fexit = a set of program to run after original function
*/
struct bpf_tramp_image;
-int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks,
- void *orig_call);
+ void *func_addr);
+void *arch_alloc_bpf_trampoline(unsigned int size);
+void arch_free_bpf_trampoline(void *image, unsigned int size);
+void arch_protect_bpf_trampoline(void *image, unsigned int size);
+void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr);
+
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
@@ -1090,6 +1149,7 @@ enum bpf_tramp_prog_type {
struct bpf_tramp_image {
void *image;
+ int size;
struct bpf_ksym ksym;
struct percpu_ref pcref;
void *ip_after_call;
@@ -1125,7 +1185,6 @@ struct bpf_trampoline {
int progs_cnt[BPF_TRAMP_MAX];
/* Executable image of trampoline */
struct bpf_tramp_image *cur_image;
- u64 selector;
struct module *mod;
};
@@ -1160,7 +1219,11 @@ struct bpf_dispatcher {
#endif
};
-static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
+#ifndef __bpfcall
+#define __bpfcall __nocfi
+#endif
+
+static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
const void *ctx,
const struct bpf_insn *insnsi,
bpf_func_t bpf_func)
@@ -1197,7 +1260,9 @@ enum bpf_dynptr_type {
};
int bpf_dynptr_check_size(u32 size);
-u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr);
+u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
+const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
+void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
@@ -1250,7 +1315,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
#define DEFINE_BPF_DISPATCHER(name) \
__BPF_DISPATCHER_SC(name); \
- noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
+ noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
bpf_func_t bpf_func) \
@@ -1273,7 +1338,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
/* Called only from JIT-enabled code, so there's no need for stubs. */
-void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
+void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
@@ -1294,7 +1359,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return NULL;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
@@ -1317,6 +1382,8 @@ static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
struct bpf_func_info_aux {
u16 linkage;
bool unreliable;
+ bool called : 1;
+ bool verified : 1;
};
enum bpf_jit_poke_reason {
@@ -1365,6 +1432,7 @@ struct bpf_prog_aux {
u32 stack_depth;
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
u32 ctx_arg_info_size;
@@ -1381,10 +1449,13 @@ struct bpf_prog_aux {
bool dev_bound; /* Program is bound to the netdev. */
bool offload_requested; /* Program is bound and offloaded to the netdev. */
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
+ bool attach_tracing_prog; /* true if tracing another tracing program */
bool func_proto_unreliable;
bool sleepable;
bool tail_call_reachable;
bool xdp_has_frags;
+ bool exception_cb;
+ bool exception_boundary;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
@@ -1395,6 +1466,9 @@ struct bpf_prog_aux {
struct bpf_kfunc_desc_tab *kfunc_tab;
struct bpf_kfunc_btf_tab *kfunc_btf_tab;
u32 size_poke_tab;
+#ifdef CONFIG_FINEIBT
+ struct bpf_ksym ksym_prefix;
+#endif
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
@@ -1407,6 +1481,7 @@ struct bpf_prog_aux {
int cgroup_atype; /* enum cgroup_bpf_attach_type */
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
+ u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64);
#ifdef CONFIG_SECURITY
void *security;
#endif
@@ -1538,6 +1613,53 @@ struct bpf_struct_ops_value;
struct btf_member;
#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
+/**
+ * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
+ * define a BPF_MAP_TYPE_STRUCT_OPS map type composed
+ * of BPF_PROG_TYPE_STRUCT_OPS progs.
+ * @verifier_ops: A structure of callbacks that are invoked by the verifier
+ * when determining whether the struct_ops progs in the
+ * struct_ops map are valid.
+ * @init: A callback that is invoked a single time, and before any other
+ * callback, to initialize the structure. A nonzero return value means
+ * the subsystem could not be initialized.
+ * @check_member: When defined, a callback invoked by the verifier to allow
+ * the subsystem to determine if an entry in the struct_ops map
+ * is valid. A nonzero return value means that the map is
+ * invalid and should be rejected by the verifier.
+ * @init_member: A callback that is invoked for each member of the struct_ops
+ * map to allow the subsystem to initialize the member. A nonzero
+ * value means the member could not be initialized. This callback
+ * is exclusive with the @type, @type_id, @value_type, and
+ * @value_id fields.
+ * @reg: A callback that is invoked when the struct_ops map has been
+ * initialized and is being attached to. Zero means the struct_ops map
+ * has been successfully registered and is live. A nonzero return value
+ * means the struct_ops map could not be registered.
+ * @unreg: A callback that is invoked when the struct_ops map should be
+ * unregistered.
+ * @update: A callback that is invoked when the live struct_ops map is being
+ * updated to contain new values. This callback is only invoked when
+ * the struct_ops map is loaded with BPF_F_LINK. If not defined, the
+ * it is assumed that the struct_ops map cannot be updated.
+ * @validate: A callback that is invoked after all of the members have been
+ * initialized. This callback should perform static checks on the
+ * map, meaning that it should either fail or succeed
+ * deterministically. A struct_ops map that has been validated may
+ * not necessarily succeed in being registered if the call to @reg
+ * fails. For example, a valid struct_ops map may be loaded, but
+ * then fail to be registered due to there being another active
+ * struct_ops map on the system in the subsystem already. For this
+ * reason, if this callback is not defined, the check is skipped as
+ * the struct_ops map will have final verification performed in
+ * @reg.
+ * @type: BTF type.
+ * @value_type: Value type.
+ * @name: The name of the struct bpf_struct_ops object.
+ * @func_models: Func models
+ * @type_id: BTF type id.
+ * @value_id: BTF value id.
+ */
struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
@@ -1557,6 +1679,7 @@ struct bpf_struct_ops {
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
u32 type_id;
u32 value_id;
+ void *cfi_stubs;
};
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
@@ -1570,6 +1693,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
struct bpf_tramp_link *link,
const struct btf_func_model *model,
+ void *stub_func,
void *image, void *image_end);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
@@ -1807,6 +1931,7 @@ struct bpf_cg_run_ctx {
struct bpf_trace_run_ctx {
struct bpf_run_ctx run_ctx;
u64 bpf_cookie;
+ bool is_uprobe;
};
struct bpf_tramp_run_ctx {
@@ -1855,6 +1980,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
if (unlikely(!array))
return ret;
+ run_ctx.is_uprobe = false;
+
migrate_disable();
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
item = &array->items[0];
@@ -1879,8 +2006,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
* rcu-protected dynamically sized maps.
*/
static __always_inline u32
-bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
- const void *ctx, bpf_prog_run_fn run_prog)
+bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
+ const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
@@ -1894,6 +2021,8 @@ bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
rcu_read_lock_trace();
migrate_disable();
+ run_ctx.is_uprobe = true;
+
array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
if (unlikely(!array))
goto out;
@@ -1978,6 +2107,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
+void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
@@ -2041,6 +2171,35 @@ bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
}
#endif
+static inline int
+bpf_map_init_elem_count(struct bpf_map *map)
+{
+ size_t size = sizeof(*map->elem_count), align = size;
+ gfp_t flags = GFP_USER | __GFP_NOWARN;
+
+ map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
+ if (!map->elem_count)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void
+bpf_map_free_elem_count(struct bpf_map *map)
+{
+ free_percpu(map->elem_count);
+}
+
+static inline void bpf_map_inc_elem_count(struct bpf_map *map)
+{
+ this_cpu_inc(*map->elem_count);
+}
+
+static inline void bpf_map_dec_elem_count(struct bpf_map *map)
+{
+ this_cpu_dec(*map->elem_count);
+}
+
extern int sysctl_unprivileged_bpf_disabled;
static inline bool bpf_allow_ptr_leaks(void)
@@ -2055,12 +2214,12 @@ static inline bool bpf_allow_uninit_stack(void)
static inline bool bpf_bypass_spec_v1(void)
{
- return perfmon_capable();
+ return cpu_mitigations_off() || perfmon_capable();
}
static inline bool bpf_bypass_spec_v4(void)
{
- return perfmon_capable();
+ return cpu_mitigations_off() || perfmon_capable();
}
int bpf_map_new_fd(struct bpf_map *map, int flags);
@@ -2074,12 +2233,11 @@ void bpf_link_cleanup(struct bpf_link_primer *primer);
void bpf_link_inc(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
-struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
-int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
-int bpf_obj_get_user(const char __user *pathname, int flags);
+int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
+int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
#define DEFINE_BPF_ITER_FUNC(target, args...) \
@@ -2309,14 +2467,11 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf_func_model *m);
struct bpf_reg_state;
-int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs);
-int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs);
-int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *reg);
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
+const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key);
struct bpf_prog *bpf_prog_by_id(u32 id);
struct bpf_link *bpf_link_by_id(u32 id);
@@ -2368,6 +2523,9 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size);
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+
+bool dev_check_flush(void);
+bool cpu_map_check_flush(void);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -2620,6 +2778,18 @@ static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
}
#endif /* CONFIG_BPF_SYSCALL */
+static __always_inline int
+bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
+{
+ int ret = -EFAULT;
+
+ if (IS_ENABLED(CONFIG_BPF_EVENTS))
+ ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+ return ret;
+}
+
void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
struct btf_mod_pair *used_btfs, u32 len);
@@ -2800,6 +2970,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
#endif /* CONFIG_BPF_SYSCALL */
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+static __always_inline void
+bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
+{
+ const struct bpf_prog_array_item *item;
+ struct bpf_prog *prog;
+
+ if (unlikely(!array))
+ return;
+
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ bpf_prog_inc_misses_counter(prog);
+ item++;
+ }
+}
+
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
@@ -3029,6 +3215,9 @@ enum bpf_text_poke_type {
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2);
+void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+ struct bpf_prog *new, struct bpf_prog *old);
+
void *bpf_arch_text_copy(void *dst, void *src, size_t len);
int bpf_arch_text_invalidate(void *dst, size_t len);
@@ -3078,4 +3267,9 @@ static inline gfp_t bpf_memcg_flags(gfp_t flags)
return flags;
}
+static inline bool bpf_is_subprog(const struct bpf_prog *prog)
+{
+ return prog->aux->func_idx != 0;
+}
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index 3929be5743f4..aaf004d94322 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -11,6 +11,8 @@ struct bpf_mem_caches;
struct bpf_mem_alloc {
struct bpf_mem_caches __percpu *caches;
struct bpf_mem_cache __percpu *cache;
+ struct obj_cgroup *objcg;
+ bool percpu;
struct work_struct work;
};
@@ -20,17 +22,26 @@ struct bpf_mem_alloc {
* 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects.
* Alloc and free are done with bpf_mem_{alloc,free}() and the size of
* the returned object is given by the size argument of bpf_mem_alloc().
+ * If percpu equals true, error will be returned in order to avoid
+ * large memory consumption and the below bpf_mem_alloc_percpu_unit_init()
+ * should be used to do on-demand per-cpu allocation for each size.
*/
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+/* Initialize a non-fix-size percpu memory allocator */
+int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg);
+/* The percpu allocation with a specific unit size. */
+int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
/* kmalloc/kfree equivalent: */
void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
/* kmem_cache_alloc/free equivalent: */
void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
void bpf_mem_cache_raw_free(void *ptr);
void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags);
diff --git a/include/linux/bpf_mprog.h b/include/linux/bpf_mprog.h
new file mode 100644
index 000000000000..929225f7b095
--- /dev/null
+++ b/include/linux/bpf_mprog.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Isovalent */
+#ifndef __BPF_MPROG_H
+#define __BPF_MPROG_H
+
+#include <linux/bpf.h>
+
+/* bpf_mprog framework:
+ *
+ * bpf_mprog is a generic layer for multi-program attachment. In-kernel users
+ * of the bpf_mprog don't need to care about the dependency resolution
+ * internals, they can just consume it with few API calls. Currently available
+ * dependency directives are BPF_F_{BEFORE,AFTER} which enable insertion of
+ * a BPF program or BPF link relative to an existing BPF program or BPF link
+ * inside the multi-program array as well as prepend and append behavior if
+ * no relative object was specified, see corresponding selftests for concrete
+ * examples (e.g. tc_links and tc_opts test cases of test_progs).
+ *
+ * Usage of bpf_mprog_{attach,detach,query}() core APIs with pseudo code:
+ *
+ * Attach case:
+ *
+ * struct bpf_mprog_entry *entry, *entry_new;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_attach(entry, &entry_new, [...]);
+ * if (!ret) {
+ * if (entry != entry_new) {
+ * // swap @entry to @entry_new at attach location
+ * // ensure there are no inflight users of @entry:
+ * synchronize_rcu();
+ * }
+ * bpf_mprog_commit(entry);
+ * } else {
+ * // error path, bail out, propagate @ret
+ * }
+ * // bpf_mprog user-side unlock
+ *
+ * Detach case:
+ *
+ * struct bpf_mprog_entry *entry, *entry_new;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_detach(entry, &entry_new, [...]);
+ * if (!ret) {
+ * // all (*) marked is optional and depends on the use-case
+ * // whether bpf_mprog_bundle should be freed or not
+ * if (!bpf_mprog_total(entry_new)) (*)
+ * entry_new = NULL (*)
+ * // swap @entry to @entry_new at attach location
+ * // ensure there are no inflight users of @entry:
+ * synchronize_rcu();
+ * bpf_mprog_commit(entry);
+ * if (!entry_new) (*)
+ * // free bpf_mprog_bundle (*)
+ * } else {
+ * // error path, bail out, propagate @ret
+ * }
+ * // bpf_mprog user-side unlock
+ *
+ * Query case:
+ *
+ * struct bpf_mprog_entry *entry;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_query(attr, uattr, entry);
+ * // bpf_mprog user-side unlock
+ *
+ * Data/fast path:
+ *
+ * struct bpf_mprog_entry *entry;
+ * struct bpf_mprog_fp *fp;
+ * struct bpf_prog *prog;
+ * int ret = [...];
+ *
+ * rcu_read_lock();
+ * // fetch active @entry from attach location
+ * [...]
+ * bpf_mprog_foreach_prog(entry, fp, prog) {
+ * ret = bpf_prog_run(prog, [...]);
+ * // process @ret from program
+ * }
+ * [...]
+ * rcu_read_unlock();
+ *
+ * bpf_mprog locking considerations:
+ *
+ * bpf_mprog_{attach,detach,query}() must be protected by an external lock
+ * (like RTNL in case of tcx).
+ *
+ * bpf_mprog_entry pointer can be an __rcu annotated pointer (in case of tcx
+ * the netdevice has tcx_ingress and tcx_egress __rcu pointer) which gets
+ * updated via rcu_assign_pointer() pointing to the active bpf_mprog_entry of
+ * the bpf_mprog_bundle.
+ *
+ * Fast path accesses the active bpf_mprog_entry within RCU critical section
+ * (in case of tcx it runs in NAPI which provides RCU protection there,
+ * other users might need explicit rcu_read_lock()). The bpf_mprog_commit()
+ * assumes that for the old bpf_mprog_entry there are no inflight users
+ * anymore.
+ *
+ * The READ_ONCE()/WRITE_ONCE() pairing for bpf_mprog_fp's prog access is for
+ * the replacement case where we don't swap the bpf_mprog_entry.
+ */
+
+#define bpf_mprog_foreach_tuple(entry, fp, cp, t) \
+ for (fp = &entry->fp_items[0], cp = &entry->parent->cp_items[0];\
+ ({ \
+ t.prog = READ_ONCE(fp->prog); \
+ t.link = cp->link; \
+ t.prog; \
+ }); \
+ fp++, cp++)
+
+#define bpf_mprog_foreach_prog(entry, fp, p) \
+ for (fp = &entry->fp_items[0]; \
+ (p = READ_ONCE(fp->prog)); \
+ fp++)
+
+#define BPF_MPROG_MAX 64
+
+struct bpf_mprog_fp {
+ struct bpf_prog *prog;
+};
+
+struct bpf_mprog_cp {
+ struct bpf_link *link;
+};
+
+struct bpf_mprog_entry {
+ struct bpf_mprog_fp fp_items[BPF_MPROG_MAX];
+ struct bpf_mprog_bundle *parent;
+};
+
+struct bpf_mprog_bundle {
+ struct bpf_mprog_entry a;
+ struct bpf_mprog_entry b;
+ struct bpf_mprog_cp cp_items[BPF_MPROG_MAX];
+ struct bpf_prog *ref;
+ atomic64_t revision;
+ u32 count;
+};
+
+struct bpf_tuple {
+ struct bpf_prog *prog;
+ struct bpf_link *link;
+};
+
+static inline struct bpf_mprog_entry *
+bpf_mprog_peer(const struct bpf_mprog_entry *entry)
+{
+ if (entry == &entry->parent->a)
+ return &entry->parent->b;
+ else
+ return &entry->parent->a;
+}
+
+static inline void bpf_mprog_bundle_init(struct bpf_mprog_bundle *bundle)
+{
+ BUILD_BUG_ON(sizeof(bundle->a.fp_items[0]) > sizeof(u64));
+ BUILD_BUG_ON(ARRAY_SIZE(bundle->a.fp_items) !=
+ ARRAY_SIZE(bundle->cp_items));
+
+ memset(bundle, 0, sizeof(*bundle));
+ atomic64_set(&bundle->revision, 1);
+ bundle->a.parent = bundle;
+ bundle->b.parent = bundle;
+}
+
+static inline void bpf_mprog_inc(struct bpf_mprog_entry *entry)
+{
+ entry->parent->count++;
+}
+
+static inline void bpf_mprog_dec(struct bpf_mprog_entry *entry)
+{
+ entry->parent->count--;
+}
+
+static inline int bpf_mprog_max(void)
+{
+ return ARRAY_SIZE(((struct bpf_mprog_entry *)NULL)->fp_items) - 1;
+}
+
+static inline int bpf_mprog_total(struct bpf_mprog_entry *entry)
+{
+ int total = entry->parent->count;
+
+ WARN_ON_ONCE(total > bpf_mprog_max());
+ return total;
+}
+
+static inline bool bpf_mprog_exists(struct bpf_mprog_entry *entry,
+ struct bpf_prog *prog)
+{
+ const struct bpf_mprog_fp *fp;
+ const struct bpf_prog *tmp;
+
+ bpf_mprog_foreach_prog(entry, fp, tmp) {
+ if (tmp == prog)
+ return true;
+ }
+ return false;
+}
+
+static inline void bpf_mprog_mark_for_release(struct bpf_mprog_entry *entry,
+ struct bpf_tuple *tuple)
+{
+ WARN_ON_ONCE(entry->parent->ref);
+ if (!tuple->link)
+ entry->parent->ref = tuple->prog;
+}
+
+static inline void bpf_mprog_complete_release(struct bpf_mprog_entry *entry)
+{
+ /* In the non-link case prog deletions can only drop the reference
+ * to the prog after the bpf_mprog_entry got swapped and the
+ * bpf_mprog ensured that there are no inflight users anymore.
+ *
+ * Paired with bpf_mprog_mark_for_release().
+ */
+ if (entry->parent->ref) {
+ bpf_prog_put(entry->parent->ref);
+ entry->parent->ref = NULL;
+ }
+}
+
+static inline void bpf_mprog_revision_new(struct bpf_mprog_entry *entry)
+{
+ atomic64_inc(&entry->parent->revision);
+}
+
+static inline void bpf_mprog_commit(struct bpf_mprog_entry *entry)
+{
+ bpf_mprog_complete_release(entry);
+ bpf_mprog_revision_new(entry);
+}
+
+static inline u64 bpf_mprog_revision(struct bpf_mprog_entry *entry)
+{
+ return atomic64_read(&entry->parent->revision);
+}
+
+static inline void bpf_mprog_entry_copy(struct bpf_mprog_entry *dst,
+ struct bpf_mprog_entry *src)
+{
+ memcpy(dst->fp_items, src->fp_items, sizeof(src->fp_items));
+}
+
+static inline void bpf_mprog_entry_clear(struct bpf_mprog_entry *dst)
+{
+ memset(dst->fp_items, 0, sizeof(dst->fp_items));
+}
+
+static inline void bpf_mprog_clear_all(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new)
+{
+ struct bpf_mprog_entry *peer;
+
+ peer = bpf_mprog_peer(entry);
+ bpf_mprog_entry_clear(peer);
+ peer->parent->count = 0;
+ *entry_new = peer;
+}
+
+static inline void bpf_mprog_entry_grow(struct bpf_mprog_entry *entry, int idx)
+{
+ int total = bpf_mprog_total(entry);
+
+ memmove(entry->fp_items + idx + 1,
+ entry->fp_items + idx,
+ (total - idx) * sizeof(struct bpf_mprog_fp));
+
+ memmove(entry->parent->cp_items + idx + 1,
+ entry->parent->cp_items + idx,
+ (total - idx) * sizeof(struct bpf_mprog_cp));
+}
+
+static inline void bpf_mprog_entry_shrink(struct bpf_mprog_entry *entry, int idx)
+{
+ /* Total array size is needed in this case to enure the NULL
+ * entry is copied at the end.
+ */
+ int total = ARRAY_SIZE(entry->fp_items);
+
+ memmove(entry->fp_items + idx,
+ entry->fp_items + idx + 1,
+ (total - idx - 1) * sizeof(struct bpf_mprog_fp));
+
+ memmove(entry->parent->cp_items + idx,
+ entry->parent->cp_items + idx + 1,
+ (total - idx - 1) * sizeof(struct bpf_mprog_cp));
+}
+
+static inline void bpf_mprog_read(struct bpf_mprog_entry *entry, u32 idx,
+ struct bpf_mprog_fp **fp,
+ struct bpf_mprog_cp **cp)
+{
+ *fp = &entry->fp_items[idx];
+ *cp = &entry->parent->cp_items[idx];
+}
+
+static inline void bpf_mprog_write(struct bpf_mprog_fp *fp,
+ struct bpf_mprog_cp *cp,
+ struct bpf_tuple *tuple)
+{
+ WRITE_ONCE(fp->prog, tuple->prog);
+ cp->link = tuple->link;
+}
+
+int bpf_mprog_attach(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new,
+ struct bpf_prog *prog_new, struct bpf_link *link,
+ struct bpf_prog *prog_old,
+ u32 flags, u32 id_or_fd, u64 revision);
+
+int bpf_mprog_detach(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new,
+ struct bpf_prog *prog, struct bpf_link *link,
+ u32 flags, u32 id_or_fd, u64 revision);
+
+int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
+ struct bpf_mprog_entry *entry);
+
+static inline bool bpf_mprog_supported(enum bpf_prog_type type)
+{
+ switch (type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif /* __BPF_MPROG_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index fc0d6f32c687..94baced5a1ad 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -142,9 +142,13 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
#ifdef CONFIG_NET
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter)
+BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit)
#endif
#ifdef CONFIG_PERF_EVENTS
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
#endif
BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
+BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 3dd29a53b711..d07d857ca67f 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -18,8 +18,11 @@
* that converting umax_value to int cannot overflow.
*/
#define BPF_MAX_VAR_SIZ (1 << 29)
-/* size of type_str_buf in bpf_verifier. */
-#define TYPE_STR_BUF_LEN 128
+/* size of tmp_str_buf in bpf_verifier.
+ * we need at least 306 bytes to fit full stack mask representation
+ * (in the "-8,-16,...,-512" form)
+ */
+#define TMP_STR_BUF_LEN 320
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
@@ -238,6 +241,10 @@ enum bpf_stack_slot_type {
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
+ (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
+ (1 << BPF_REG_5))
+
#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
@@ -268,6 +275,11 @@ struct bpf_reference_state {
int callback_ref;
};
+struct bpf_retval_range {
+ s32 minval;
+ s32 maxval;
+};
+
/* state of the program:
* type of all registers and stack info
*/
@@ -290,28 +302,67 @@ struct bpf_func_state {
* void foo(void) { bpf_timer_set_callback(,foo); }
*/
u32 async_entry_cnt;
+ struct bpf_retval_range callback_ret_range;
bool in_callback_fn;
- struct tnum callback_ret_range;
bool in_async_callback_fn;
+ bool in_exception_callback_fn;
+ /* For callback calling functions that limit number of possible
+ * callback executions (e.g. bpf_loop) keeps track of current
+ * simulated iteration number.
+ * Value in frame N refers to number of times callback with frame
+ * N+1 was simulated, e.g. for the following call:
+ *
+ * bpf_loop(..., fn, ...); | suppose current frame is N
+ * | fn would be simulated in frame N+1
+ * | number of simulations is tracked in frame N
+ */
+ u32 callback_depth;
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
struct bpf_reference_state *refs;
- int allocated_stack;
+ /* The state of the stack. Each element of the array describes BPF_REG_SIZE
+ * (i.e. 8) bytes worth of stack memory.
+ * stack[0] represents bytes [*(r10-8)..*(r10-1)]
+ * stack[1] represents bytes [*(r10-16)..*(r10-9)]
+ * ...
+ * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
+ */
struct bpf_stack_state *stack;
+ /* Size of the current stack, in bytes. The stack state is tracked below, in
+ * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
+ */
+ int allocated_stack;
};
-struct bpf_idx_pair {
- u32 prev_idx;
- u32 idx;
+#define MAX_CALL_FRAMES 8
+
+/* instruction history flags, used in bpf_jmp_history_entry.flags field */
+enum {
+ /* instruction references stack slot through PTR_TO_STACK register;
+ * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
+ * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
+ * 8 bytes per slot, so slot index (spi) is [0, 63])
+ */
+ INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
+
+ INSN_F_SPI_MASK = 0x3f, /* 6 bits */
+ INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
+
+ INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
};
-struct bpf_id_pair {
- u32 old;
- u32 cur;
+static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
+static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
+
+struct bpf_jmp_history_entry {
+ u32 idx;
+ /* insn idx can't be bigger than 1 million */
+ u32 prev_idx : 22;
+ /* special flags, e.g., whether insn is doing register stack spill/load */
+ u32 flags : 10;
};
-#define MAX_CALL_FRAMES 8
/* Maximum number of register states that can exist at once */
#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
struct bpf_verifier_state {
@@ -370,32 +421,48 @@ struct bpf_verifier_state {
struct bpf_active_lock active_lock;
bool speculative;
bool active_rcu_lock;
+ /* If this state was ever pointed-to by other state's loop_entry field
+ * this flag would be set to true. Used to avoid freeing such states
+ * while they are still in use.
+ */
+ bool used_as_loop_entry;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
u32 last_insn_idx;
+ /* If this state is a part of states loop this field points to some
+ * parent of this state such that:
+ * - it is also a member of the same states loop;
+ * - DFS states traversal starting from initial state visits loop_entry
+ * state before this state.
+ * Used to compute topmost loop entry for state loops.
+ * State loops might appear because of open coded iterators logic.
+ * See get_loop_entry() for more information.
+ */
+ struct bpf_verifier_state *loop_entry;
/* jmp history recorded from first to last.
* backtracking is using it to go from last to first.
* For most states jmp_history_cnt is [0-3].
* For loops can go up to ~40.
*/
- struct bpf_idx_pair *jmp_history;
+ struct bpf_jmp_history_entry *jmp_history;
u32 jmp_history_cnt;
+ u32 dfs_depth;
+ u32 callback_unroll_depth;
};
-#define bpf_get_spilled_reg(slot, frame) \
+#define bpf_get_spilled_reg(slot, frame, mask) \
(((slot < frame->allocated_stack / BPF_REG_SIZE) && \
- (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
+ ((1 << frame->stack[slot].slot_type[0]) & (mask))) \
? &frame->stack[slot].spilled_ptr : NULL)
/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
-#define bpf_for_each_spilled_reg(iter, frame, reg) \
- for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
+#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
+ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
iter < frame->allocated_stack / BPF_REG_SIZE; \
- iter++, reg = bpf_get_spilled_reg(iter, frame))
+ iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
-/* Invoke __expr over regsiters in __vst, setting __state and __reg */
-#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
({ \
struct bpf_verifier_state *___vstate = __vst; \
int ___i, ___j; \
@@ -407,7 +474,7 @@ struct bpf_verifier_state {
__reg = &___regs[___j]; \
(void)(__expr); \
} \
- bpf_for_each_spilled_reg(___j, __state, __reg) { \
+ bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
if (!__reg) \
continue; \
(void)(__expr); \
@@ -415,6 +482,10 @@ struct bpf_verifier_state {
} \
})
+/* Invoke __expr over regsiters in __vst, setting __state and __reg */
+#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+ bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
@@ -478,6 +549,7 @@ struct bpf_insn_aux_data {
bool zext_dst; /* this insn zero extends dst reg */
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
+ bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
/* below fields are initialized once */
@@ -488,6 +560,10 @@ struct bpf_insn_aux_data {
* this instruction, regardless of any heuristics
*/
bool force_checkpoint;
+ /* true if instruction is a call to a helper function that
+ * accepts callback function as a parameter.
+ */
+ bool calls_callback;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -530,15 +606,52 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
#define BPF_MAX_SUBPROGS 256
+struct bpf_subprog_arg_info {
+ enum bpf_arg_type arg_type;
+ union {
+ u32 mem_size;
+ };
+};
+
struct bpf_subprog_info {
/* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
- bool has_tail_call;
- bool tail_call_reachable;
- bool has_ld_abs;
- bool is_async_cb;
+ bool has_tail_call: 1;
+ bool tail_call_reachable: 1;
+ bool has_ld_abs: 1;
+ bool is_cb: 1;
+ bool is_async_cb: 1;
+ bool is_exception_cb: 1;
+ bool args_cached: 1;
+
+ u8 arg_cnt;
+ struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
+};
+
+struct bpf_verifier_env;
+
+struct backtrack_state {
+ struct bpf_verifier_env *env;
+ u32 frame;
+ u32 reg_masks[MAX_CALL_FRAMES];
+ u64 stack_masks[MAX_CALL_FRAMES];
+};
+
+struct bpf_id_pair {
+ u32 old;
+ u32 cur;
+};
+
+struct bpf_idmap {
+ u32 tmp_id_gen;
+ struct bpf_id_pair map[BPF_ID_MAP_SIZE];
+};
+
+struct bpf_idset {
+ u32 count;
+ u32 ids[BPF_ID_MAP_SIZE];
};
/* single container for all structs
@@ -553,6 +666,7 @@ struct bpf_verifier_env {
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
bool test_state_freq; /* test verifier with different pruning frequency */
+ bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_verifier_state_list *free_list;
@@ -561,23 +675,35 @@ struct bpf_verifier_env {
u32 used_map_cnt; /* number of used maps */
u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
+ u32 hidden_subprog_cnt; /* number of hidden subprogs */
+ int exception_callback_subprog;
bool explore_alu_limits;
bool allow_ptr_leaks;
+ /* Allow access to uninitialized stack memory. Writes with fixed offset are
+ * always allowed, so this refers to reads (with fixed or variable offset),
+ * to writes with variable offset and to indirect (helper) accesses.
+ */
bool allow_uninit_stack;
bool bpf_capable;
bool bypass_spec_v1;
bool bypass_spec_v4;
bool seen_direct_write;
+ bool seen_exception;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
- struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
- struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
+ struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
+ union {
+ struct bpf_idmap idmap_scratch;
+ struct bpf_idset idset_scratch;
+ };
struct {
int *insn_state;
int *insn_stack;
int cur_stack;
} cfg;
+ struct backtrack_state bt;
+ struct bpf_jmp_history_entry *cur_hist_ent;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
@@ -606,10 +732,22 @@ struct bpf_verifier_env {
/* Same as scratched_regs but for stack slots */
u64 scratched_stack_slots;
u64 prev_log_pos, prev_insn_print_pos;
- /* buffer used in reg_type_str() to generate reg_type string */
- char type_str_buf[TYPE_STR_BUF_LEN];
+ /* buffer used to generate temporary string representations,
+ * e.g., in reg_type_str() to generate reg_type string
+ */
+ char tmp_str_buf[TMP_STR_BUF_LEN];
};
+static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->prog->aux->func_info_aux[subprog];
+}
+
+static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->subprog_info[subprog];
+}
+
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
@@ -621,6 +759,10 @@ int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
+__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
+ u32 insn_off,
+ const char *prefix_fmt, ...);
+
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
struct bpf_verifier_state *cur = env->cur_state;
@@ -643,14 +785,6 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
-int check_ptr_off_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno);
-int check_func_arg_reg_off(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno,
- enum bpf_arg_type arg_type);
-int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- u32 regno, u32 mem_size);
-
/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
struct btf *btf, u32 btf_id)
@@ -713,11 +847,83 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
}
}
-#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED)
+#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
static inline bool bpf_type_has_unsafe_modifiers(u32 type)
{
return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
}
+static inline bool type_is_ptr_alloc_obj(u32 type)
+{
+ return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
+}
+
+static inline bool type_is_non_owning_ref(u32 type)
+{
+ return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
+}
+
+static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
+{
+ type = base_type(type);
+ return type == PTR_TO_PACKET ||
+ type == PTR_TO_PACKET_META;
+}
+
+static inline bool type_is_sk_pointer(enum bpf_reg_type type)
+{
+ return type == PTR_TO_SOCKET ||
+ type == PTR_TO_SOCK_COMMON ||
+ type == PTR_TO_TCP_SOCK ||
+ type == PTR_TO_XDP_SOCK;
+}
+
+static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
+{
+ env->scratched_regs |= 1U << regno;
+}
+
+static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
+{
+ env->scratched_stack_slots |= 1ULL << spi;
+}
+
+static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
+{
+ return (env->scratched_regs >> regno) & 1;
+}
+
+static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
+{
+ return (env->scratched_stack_slots >> regno) & 1;
+}
+
+static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
+{
+ return env->scratched_regs || env->scratched_stack_slots;
+}
+
+static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = 0U;
+ env->scratched_stack_slots = 0ULL;
+}
+
+/* Used for printing the entire verifier state. */
+static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = ~0U;
+ env->scratched_stack_slots = ~0ULL;
+}
+
+const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
+const char *dynptr_type_str(enum bpf_dynptr_type type);
+const char *iter_type_str(const struct btf *btf, u32 btf_id);
+const char *iter_state_str(enum bpf_iter_state state);
+
+void print_verifier_state(struct bpf_verifier_env *env,
+ const struct bpf_func_state *state, bool print_all);
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
+
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
deleted file mode 100644
index 2ae3c8e1d83c..000000000000
--- a/include/linux/bpfilter.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BPFILTER_H
-#define _LINUX_BPFILTER_H
-
-#include <uapi/linux/bpfilter.h>
-#include <linux/usermode_driver.h>
-#include <linux/sockptr.h>
-
-struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
- unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
- int __user *optlen);
-void bpfilter_umh_cleanup(struct umd_info *info);
-
-struct bpfilter_umh_ops {
- struct umd_info info;
- /* since ip_getsockopt() can run in parallel, serialize access to umh */
- struct mutex lock;
- int (*sockopt)(struct sock *sk, int optname, sockptr_t optval,
- unsigned int optlen, bool is_set);
- int (*start)(void);
-};
-extern struct bpfilter_umh_ops bpfilter_ops;
-#endif
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 9e77165f3ef6..1394ba302367 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -11,6 +11,7 @@
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
+#define PHY_ID_BCM5221 0x004061e0
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
@@ -44,6 +45,7 @@
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7346 0x600d8650
#define PHY_ID_BCM7362 0x600d84b0
+#define PHY_ID_BCM74165 0x359052c0
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7435 0x600d8750
@@ -89,6 +91,7 @@
#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
#define MII_BCM54XX_EXP_SEL_TOP 0x0d00 /* TOP_MISC expansion register select */
#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
+#define MII_BCM54XX_EXP_SEL_WOL 0x0e00 /* Wake-on-LAN expansion select register */
#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */
@@ -160,6 +163,7 @@
#define BCM_LED_SRC_OPENSHORT 0xb
#define BCM_LED_SRC_OFF 0xe /* Tied high */
#define BCM_LED_SRC_ON 0xf /* Tied low */
+#define BCM_LED_SRC_MASK GENMASK(3, 0)
/*
* Broadcom Multicolor LED configurations (expansion register 4)
@@ -205,11 +209,13 @@
#define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */
#define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */
-#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
+#define BCM54XX_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
/* LED3 / ~LINKSPD[2] selector */
-#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
+#define BCM54XX_SHD_LEDS_SHIFT(led) (4 * (led))
+#define BCM54XX_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
/* LED1 / ~LINKSPD[1] selector */
-#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_LEDS2 0x0e /* 01110: LED Selector 2 */
#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
@@ -253,6 +259,9 @@
#define BCM54XX_TOP_MISC_IDDQ_SD (1 << 2)
#define BCM54XX_TOP_MISC_IDDQ_SR (1 << 3)
+#define BCM54XX_TOP_MISC_LED_CTL (MII_BCM54XX_EXP_SEL_TOP + 0x0C)
+#define BCM54XX_LED4_SEL_INTR BIT(1)
+
/*
* BCM5482: Secondary SerDes registers
*/
@@ -272,6 +281,66 @@
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
#define BCM54612E_LED4_CLK125OUT_EN (1 << 1)
+
+/* Wake-on-LAN registers */
+#define BCM54XX_WOL_MAIN_CTL (MII_BCM54XX_EXP_SEL_WOL + 0x80)
+#define BCM54XX_WOL_EN BIT(0)
+#define BCM54XX_WOL_MODE_SINGLE_MPD 0
+#define BCM54XX_WOL_MODE_SINGLE_MPDSEC 1
+#define BCM54XX_WOL_MODE_DUAL 2
+#define BCM54XX_WOL_MODE_SHIFT 1
+#define BCM54XX_WOL_MODE_MASK 0x3
+#define BCM54XX_WOL_MP_MSB_FF_EN BIT(3)
+#define BCM54XX_WOL_SECKEY_OPT_4B 0
+#define BCM54XX_WOL_SECKEY_OPT_6B 1
+#define BCM54XX_WOL_SECKEY_OPT_8B 2
+#define BCM54XX_WOL_SECKEY_OPT_SHIFT 4
+#define BCM54XX_WOL_SECKEY_OPT_MASK 0x3
+#define BCM54XX_WOL_L2_TYPE_CHK BIT(6)
+#define BCM54XX_WOL_L4IPV4UDP_CHK BIT(7)
+#define BCM54XX_WOL_L4IPV6UDP_CHK BIT(8)
+#define BCM54XX_WOL_UDPPORT_CHK BIT(9)
+#define BCM54XX_WOL_CRC_CHK BIT(10)
+#define BCM54XX_WOL_SECKEY_MODE BIT(11)
+#define BCM54XX_WOL_RST BIT(12)
+#define BCM54XX_WOL_DIR_PKT_EN BIT(13)
+#define BCM54XX_WOL_MASK_MODE_DA_FF 0
+#define BCM54XX_WOL_MASK_MODE_DA_MPD 1
+#define BCM54XX_WOL_MASK_MODE_DA_ONLY 2
+#define BCM54XX_WOL_MASK_MODE_MPD 3
+#define BCM54XX_WOL_MASK_MODE_SHIFT 14
+#define BCM54XX_WOL_MASK_MODE_MASK 0x3
+
+#define BCM54XX_WOL_INNER_PROTO (MII_BCM54XX_EXP_SEL_WOL + 0x81)
+#define BCM54XX_WOL_OUTER_PROTO (MII_BCM54XX_EXP_SEL_WOL + 0x82)
+#define BCM54XX_WOL_OUTER_PROTO2 (MII_BCM54XX_EXP_SEL_WOL + 0x83)
+
+#define BCM54XX_WOL_MPD_DATA1(x) (MII_BCM54XX_EXP_SEL_WOL + 0x84 + (x))
+#define BCM54XX_WOL_MPD_DATA2(x) (MII_BCM54XX_EXP_SEL_WOL + 0x87 + (x))
+#define BCM54XX_WOL_SEC_KEY_8B (MII_BCM54XX_EXP_SEL_WOL + 0x8A)
+#define BCM54XX_WOL_MASK(x) (MII_BCM54XX_EXP_SEL_WOL + 0x8B + (x))
+#define BCM54XX_SEC_KEY_STORE(x) (MII_BCM54XX_EXP_SEL_WOL + 0x8E)
+#define BCM54XX_WOL_SHARED_CNT (MII_BCM54XX_EXP_SEL_WOL + 0x92)
+
+#define BCM54XX_WOL_INT_MASK (MII_BCM54XX_EXP_SEL_WOL + 0x93)
+#define BCM54XX_WOL_PKT1 BIT(0)
+#define BCM54XX_WOL_PKT2 BIT(1)
+#define BCM54XX_WOL_DIR BIT(2)
+#define BCM54XX_WOL_ALL_INTRS (BCM54XX_WOL_PKT1 | \
+ BCM54XX_WOL_PKT2 | \
+ BCM54XX_WOL_DIR)
+
+#define BCM54XX_WOL_INT_STATUS (MII_BCM54XX_EXP_SEL_WOL + 0x94)
+
+/* BCM5221 Registers */
+#define BCM5221_AEGSR 0x1C
+#define BCM5221_AEGSR_MDIX_STATUS BIT(13)
+#define BCM5221_AEGSR_MDIX_MAN_SWAP BIT(12)
+#define BCM5221_AEGSR_MDIX_DIS BIT(11)
+
+#define BCM5221_SHDW_AM4_EN_CLK_LPM BIT(2)
+#define BCM5221_SHDW_AM4_FORCE_LPM BIT(1)
+
/*****************************************************************************/
/* Fast Ethernet Transceiver definitions. */
/*****************************************************************************/
@@ -304,6 +373,8 @@
#define LPI_FEATURE_EN 0x8000
#define LPI_FEATURE_EN_DIG1000X 0x4000
+#define BRCM_CL45VEN_EEE_LPI_CNT 0x803f
+
/* Core register definitions*/
#define MII_BRCM_CORE_BASE12 0x12
#define MII_BRCM_CORE_BASE13 0x13
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index 1ac81c809da9..ee2df73edf83 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -9,7 +9,7 @@ struct device;
struct request_queue;
typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
- fmode_t mode, unsigned int timeout);
+ bool open_for_write, unsigned int timeout);
struct bsg_device *bsg_register_queue(struct request_queue *q,
struct device *parent, const char *name,
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 508199e38415..cf5c6ff48981 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -74,6 +74,7 @@
#define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */
#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
+#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */
/*
* Tag marking a kernel function as a kfunc. This is meant to minimize the
@@ -83,6 +84,17 @@
*/
#define __bpf_kfunc __used noinline
+#define __bpf_kfunc_start_defs() \
+ __diag_push(); \
+ __diag_ignore_all("-Wmissing-declarations", \
+ "Global kfuncs as their definitions will be in BTF");\
+ __diag_ignore_all("-Wmissing-prototypes", \
+ "Global kfuncs as their definitions will be in BTF")
+
+#define __bpf_kfunc_end_defs() __diag_pop()
+#define __bpf_hook_start() __bpf_kfunc_start_defs()
+#define __bpf_hook_end() __bpf_kfunc_end_defs()
+
/*
* Return the name of the passed struct, if exists, or halt the build if for
* example the structure gets renamed. In this way, developers have to revisit
@@ -98,10 +110,14 @@ struct btf_type;
union bpf_attr;
struct btf_show;
struct btf_id_set;
+struct bpf_prog;
+
+typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *prog, u32 kfunc_id);
struct btf_kfunc_id_set {
struct module *owner;
struct btf_id_set8 *set;
+ btf_kfunc_filter_t filter;
};
struct btf_id_dtor_kfunc {
@@ -200,13 +216,12 @@ u32 btf_nr_types(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
-int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
-int btf_find_timer(const struct btf *btf, const struct btf_type *t);
struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
u32 field_mask, u32 value_size);
int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
@@ -479,7 +494,6 @@ static inline void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id)
return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func);
}
-struct bpf_prog;
struct bpf_verifier_log;
#ifdef CONFIG_BPF_SYSCALL
@@ -487,10 +501,10 @@ const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
-u32 *btf_kfunc_id_set_contains(const struct btf *btf,
- enum bpf_prog_type prog_type,
- u32 kfunc_btf_id);
-u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id);
+u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
+ const struct bpf_prog *prog);
+u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
+ const struct bpf_prog *prog);
int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
const struct btf_kfunc_id_set *s);
int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset);
@@ -498,7 +512,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
struct module *owner);
struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
-const struct btf_member *
+const struct btf_type *
btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, enum bpf_prog_type prog_type,
int arg);
@@ -517,8 +531,9 @@ static inline const char *btf_name_by_offset(const struct btf *btf,
return NULL;
}
static inline u32 *btf_kfunc_id_set_contains(const struct btf *btf,
- enum bpf_prog_type prog_type,
- u32 kfunc_btf_id)
+ u32 kfunc_btf_id,
+ struct bpf_prog *prog)
+
{
return NULL;
}
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index 00950cc03bff..a9cb10b0e2e9 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -49,7 +49,7 @@ word \
____BTF_ID(symbol, word)
#define __ID(prefix) \
- __PASTE(prefix, __COUNTER__)
+ __PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
/*
* The BTF_ID defines unique symbol for each ID pointing
@@ -267,5 +267,6 @@ MAX_BTF_TRACING_TYPE,
extern u32 btf_tracing_ids[];
extern u32 bpf_cgroup_btf_id[];
extern u32 bpf_local_storage_map_btf_id[];
+extern u32 btf_bpf_map_id[];
#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 1520793c72da..d78454a4dd1f 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -16,8 +16,6 @@
#include <linux/wait.h>
#include <linux/atomic.h>
-#ifdef CONFIG_BLOCK
-
enum bh_state_bits {
BH_Uptodate, /* Contains valid data */
BH_Dirty, /* Is dirty */
@@ -173,7 +171,10 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh)
return test_bit_acquire(BH_Uptodate, &bh->b_state);
}
-#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+static inline unsigned long bh_offset(const struct buffer_head *bh)
+{
+ return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
+}
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
@@ -194,29 +195,23 @@ void buffer_check_dirty_writeback(struct folio *folio,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
void touch_buffer(struct buffer_head *bh);
-void set_bh_page(struct buffer_head *bh,
- struct page *page, unsigned long offset);
void folio_set_bh(struct buffer_head *bh, struct folio *folio,
unsigned long offset);
-bool try_to_free_buffers(struct folio *);
struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
- bool retry);
+ gfp_t gfp);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
-void create_empty_buffers(struct page *, unsigned long,
- unsigned long b_state);
-void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
- unsigned long b_state);
+struct buffer_head *create_empty_buffers(struct folio *folio,
+ unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
-int inode_has_buffers(struct inode *);
-void invalidate_inode_buffers(struct inode *);
-int remove_inode_buffers(struct inode *inode);
-int sync_mapping_buffers(struct address_space *mapping);
+int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
+ bool datasync);
+int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
+ bool datasync);
void clean_bdev_aliases(struct block_device *bdev, sector_t block,
sector_t len);
static inline void clean_bdev_bh_alias(struct buffer_head *bh)
@@ -229,16 +224,13 @@ void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
-struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp);
+struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp);
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
-void invalidate_bh_lrus(void);
-void invalidate_bh_lrus_cpu(void);
-bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
@@ -254,18 +246,15 @@ int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
void __bh_read_batch(int nr, struct buffer_head *bhs[],
blk_opf_t op_flags, bool force_lock);
-extern int buffer_heads_over_limit;
-
/*
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
-int __block_write_full_page(struct inode *inode, struct page *page,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler);
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+ void *get_block);
+int __block_write_full_folio(struct inode *inode, struct folio *folio,
+ get_block_t *get_block, struct writeback_control *wbc);
int block_read_full_folio(struct folio *, get_block_t *);
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
@@ -278,27 +267,14 @@ int block_write_end(struct file *, struct address_space *,
int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
-void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
-void clean_page_buffers(struct page *page);
+void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct page **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
-int block_commit_write(struct page *page, unsigned from, unsigned to);
+void block_commit_write(struct page *page, unsigned int from, unsigned int to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
-/* Convert errno to return value from ->page_mkwrite() call */
-static inline vm_fault_t block_page_mkwrite_return(int err)
-{
- if (err == 0)
- return VM_FAULT_LOCKED;
- if (err == -EFAULT || err == -EAGAIN)
- return VM_FAULT_NOPAGE;
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- /* -ENOSPC, -EDQUOT, -EIO ... */
- return VM_FAULT_SIGBUS;
-}
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
@@ -312,8 +288,6 @@ extern int buffer_migrate_folio_norefs(struct address_space *,
#define buffer_migrate_folio_norefs NULL
#endif
-void buffer_init(void);
-
/*
* inline definitions
*/
@@ -359,17 +333,38 @@ sb_breadahead(struct super_block *sb, sector_t block)
__breadahead(sb->s_bdev, block, sb->s_blocksize);
}
-static inline struct buffer_head *
-sb_getblk(struct super_block *sb, sector_t block)
+static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
+ sector_t block, unsigned size)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
+ gfp_t gfp;
+
+ gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
+ gfp |= __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
}
+static inline struct buffer_head *__getblk(struct block_device *bdev,
+ sector_t block, unsigned size)
+{
+ gfp_t gfp;
-static inline struct buffer_head *
-sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+ gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
+ gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
+}
+
+static inline struct buffer_head *sb_getblk(struct super_block *sb,
+ sector_t block)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+ return __getblk(sb->s_bdev, block, sb->s_blocksize);
+}
+
+static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb,
+ sector_t block, gfp_t gfp)
+{
+ return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp);
}
static inline struct buffer_head *
@@ -406,20 +401,6 @@ static inline void lock_buffer(struct buffer_head *bh)
__lock_buffer(bh);
}
-static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
- sector_t block,
- unsigned size)
-{
- return __getblk_gfp(bdev, block, size, 0);
-}
-
-static inline struct buffer_head *__getblk(struct block_device *bdev,
- sector_t block,
- unsigned size)
-{
- return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
-}
-
static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
{
if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
@@ -471,9 +452,44 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
}
+/**
+ * get_nth_bh - Get a reference on the n'th buffer after this one.
+ * @bh: The buffer to start counting from.
+ * @count: How many buffers to skip.
+ *
+ * This is primarily useful for finding the nth buffer in a folio; in
+ * that case you pass the head buffer and the byte offset in the folio
+ * divided by the block size. It can be used for other purposes, but
+ * it will wrap at the end of the folio rather than returning NULL or
+ * proceeding to the next folio for you.
+ *
+ * Return: The requested buffer with an elevated refcount.
+ */
+static inline __must_check
+struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count)
+{
+ while (count--)
+ bh = bh->b_this_page;
+ get_bh(bh);
+ return bh;
+}
+
bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
-#else /* CONFIG_BLOCK */
+#ifdef CONFIG_BUFFER_HEAD
+
+void buffer_init(void);
+bool try_to_free_buffers(struct folio *folio);
+int inode_has_buffers(struct inode *inode);
+void invalidate_inode_buffers(struct inode *inode);
+int remove_inode_buffers(struct inode *inode);
+int sync_mapping_buffers(struct address_space *mapping);
+void invalidate_bh_lrus(void);
+void invalidate_bh_lrus_cpu(void);
+bool has_bh_in_lru(int cpu, void *dummy);
+extern int buffer_heads_over_limit;
+
+#else /* CONFIG_BUFFER_HEAD */
static inline void buffer_init(void) {}
static inline bool try_to_free_buffers(struct folio *folio) { return true; }
@@ -481,9 +497,10 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
+static inline void invalidate_bh_lrus(void) {}
static inline void invalidate_bh_lrus_cpu(void) {}
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
#define buffer_heads_over_limit 0
-#endif /* CONFIG_BLOCK */
+#endif /* CONFIG_BUFFER_HEAD */
#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 3b7a0ff4642f..8a582d242f06 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -2,10 +2,11 @@
#ifndef _LINUX_BUILDID_H
#define _LINUX_BUILDID_H
-#include <linux/mm_types.h>
+#include <linux/types.h>
#define BUILD_ID_SIZE_MAX 20
+struct vm_area_struct;
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
__u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 555aae5448ae..bd1e361b351c 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -83,7 +83,7 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
-} __packed;
+} __packed __aligned(4);
struct bvec_iter_all {
struct bio_vec bv;
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 5da1bbd96154..0ecb17bb6883 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -85,6 +85,31 @@
#define cache_line_size() L1_CACHE_BYTES
#endif
+#ifndef __cacheline_group_begin
+#define __cacheline_group_begin(GROUP) \
+ __u8 __cacheline_group_begin__##GROUP[0]
+#endif
+
+#ifndef __cacheline_group_end
+#define __cacheline_group_end(GROUP) \
+ __u8 __cacheline_group_end__##GROUP[0]
+#endif
+
+#ifndef CACHELINE_ASSERT_GROUP_MEMBER
+#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
+ BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \
+ offsetofend(TYPE, MEMBER) <= \
+ offsetof(TYPE, __cacheline_group_end__##GROUP)))
+#endif
+
+#ifndef CACHELINE_ASSERT_GROUP_SIZE
+#define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \
+ BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \
+ SIZE)
+#endif
+
/*
* Helper to add padding within a struct to ensure data fall into separate
* cachelines.
@@ -98,4 +123,10 @@ struct cacheline_padding {
#define CACHELINE_PADDING(name)
#endif
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_HAS_DMA_MINALIGN
+#else
+#define ARCH_DMA_MINALIGN __alignof__(unsigned long long)
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
index a6189d21f2ba..55f297b2c23f 100644
--- a/include/linux/cacheflush.h
+++ b/include/linux/cacheflush.h
@@ -7,14 +7,23 @@
struct folio;
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
+#ifndef flush_dcache_folio
void flush_dcache_folio(struct folio *folio);
#endif
#else
static inline void flush_dcache_folio(struct folio *folio)
{
}
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0
+#define flush_dcache_folio flush_dcache_folio
#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+#ifndef flush_icache_pages
+static inline void flush_icache_pages(struct vm_area_struct *vma,
+ struct page *page, unsigned int nr)
+{
+}
+#endif
+
+#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
+
#endif /* _LINUX_CACHEFLUSH_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index a5cfd44fab45..d504eb4b49ab 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -73,6 +73,7 @@ struct cacheinfo {
struct cpu_cacheinfo {
struct cacheinfo *info_list;
+ unsigned int per_cpu_data_slice_size;
unsigned int num_levels;
unsigned int num_leaves;
bool cpu_map_populated;
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 982ba245eb41..1b92aed49363 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -195,6 +195,10 @@ int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
const char *can_get_state_str(const enum can_state state);
+void can_state_get_by_berr_counter(const struct net_device *dev,
+ const struct can_berr_counter *bec,
+ enum can_state *tx_state,
+ enum can_state *rx_state);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
diff --git a/include/linux/can/length.h b/include/linux/can/length.h
index 6995092b774e..abc978b38f79 100644
--- a/include/linux/can/length.h
+++ b/include/linux/can/length.h
@@ -1,126 +1,258 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net>
* Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (C) 2020, 2023 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
#ifndef _CAN_LENGTH_H
#define _CAN_LENGTH_H
+#include <linux/bits.h>
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/math.h>
+
/*
- * Size of a Classical CAN Standard Frame
+ * Size of a Classical CAN Standard Frame header in bits
*
- * Name of Field Bits
+ * Name of Field Bits
* ---------------------------------------------------------
- * Start-of-frame 1
- * Identifier 11
- * Remote transmission request (RTR) 1
- * Identifier extension bit (IDE) 1
- * Reserved bit (r0) 1
- * Data length code (DLC) 4
- * Data field 0...64
- * CRC 15
- * CRC delimiter 1
- * ACK slot 1
- * ACK delimiter 1
- * End-of-frame (EOF) 7
- * Inter frame spacing 3
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Remote Transmission Request (RTR) 1
+ * Control field:
+ * IDentifier Extension bit (IDE) 1
+ * FD Format indicator (FDF) 1
+ * Data Length Code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CAN_FRAME_HEADER_SFF_BITS 19
+
+/*
+ * Size of a Classical CAN Extended Frame header in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Substitute Remote Request (SRR) 1
+ * IDentifier Extension bit (IDE) 1
+ * ID extension 18
+ * Remote Transmission Request (RTR) 1
+ * Control field:
+ * FD Format indicator (FDF) 1
+ * Reserved bit (r0) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CAN_FRAME_HEADER_EFF_BITS 39
+
+/*
+ * Size of a CAN-FD Standard Frame in bits
*
- * rounded up and ignoring bitstuffing
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Remote Request Substitution (RRS) 1
+ * Control field:
+ * IDentifier Extension bit (IDE) 1
+ * FD Format indicator (FDF) 1
+ * Reserved bit (res) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CANFD_FRAME_HEADER_SFF_BITS 22
+
+/*
+ * Size of a CAN-FD Extended Frame in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Substitute Remote Request (SRR) 1
+ * IDentifier Extension bit (IDE) 1
+ * ID extension 18
+ * Remote Request Substitution (RRS) 1
+ * Control field:
+ * FD Format indicator (FDF) 1
+ * Reserved bit (res) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
*/
-#define CAN_FRAME_OVERHEAD_SFF DIV_ROUND_UP(47, 8)
+#define CANFD_FRAME_HEADER_EFF_BITS 41
/*
- * Size of a Classical CAN Extended Frame
+ * Size of a CAN CRC Field in bits
*
* Name of Field Bits
* ---------------------------------------------------------
- * Start-of-frame 1
- * Identifier A 11
- * Substitute remote request (SRR) 1
- * Identifier extension bit (IDE) 1
- * Identifier B 18
- * Remote transmission request (RTR) 1
- * Reserved bits (r1, r0) 2
- * Data length code (DLC) 4
- * Data field 0...64
- * CRC 15
- * CRC delimiter 1
- * ACK slot 1
- * ACK delimiter 1
- * End-of-frame (EOF) 7
- * Inter frame spacing 3
+ * CRC sequence (CRC15) 15
+ * CRC Delimiter 1
*
- * rounded up and ignoring bitstuffing
+ * ignoring bitstuffing
*/
-#define CAN_FRAME_OVERHEAD_EFF DIV_ROUND_UP(67, 8)
+#define CAN_FRAME_CRC_FIELD_BITS 16
/*
- * Size of a CAN-FD Standard Frame
+ * Size of a CAN-FD CRC17 Field in bits (length: 0..16)
*
* Name of Field Bits
* ---------------------------------------------------------
- * Start-of-frame 1
- * Identifier 11
- * Reserved bit (r1) 1
- * Identifier extension bit (IDE) 1
- * Flexible data rate format (FDF) 1
- * Reserved bit (r0) 1
- * Bit Rate Switch (BRS) 1
- * Error Status Indicator (ESI) 1
- * Data length code (DLC) 4
- * Data field 0...512
- * Stuff Bit Count (SBC) 0...16: 4 20...64:5
- * CRC 0...16: 17 20...64:21
- * CRC delimiter (CD) 1
- * ACK slot (AS) 1
- * ACK delimiter (AD) 1
- * End-of-frame (EOF) 7
- * Inter frame spacing 3
- *
- * assuming CRC21, rounded up and ignoring bitstuffing
- */
-#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8)
+ * Stuff Count 4
+ * CRC Sequence (CRC17) 17
+ * CRC Delimiter 1
+ * Fixed stuff bits 6
+ */
+#define CANFD_FRAME_CRC17_FIELD_BITS 28
/*
- * Size of a CAN-FD Extended Frame
+ * Size of a CAN-FD CRC21 Field in bits (length: 20..64)
*
* Name of Field Bits
* ---------------------------------------------------------
- * Start-of-frame 1
- * Identifier A 11
- * Substitute remote request (SRR) 1
- * Identifier extension bit (IDE) 1
- * Identifier B 18
- * Reserved bit (r1) 1
- * Flexible data rate format (FDF) 1
- * Reserved bit (r0) 1
- * Bit Rate Switch (BRS) 1
- * Error Status Indicator (ESI) 1
- * Data length code (DLC) 4
- * Data field 0...512
- * Stuff Bit Count (SBC) 0...16: 4 20...64:5
- * CRC 0...16: 17 20...64:21
- * CRC delimiter (CD) 1
- * ACK slot (AS) 1
- * ACK delimiter (AD) 1
- * End-of-frame (EOF) 7
- * Inter frame spacing 3
- *
- * assuming CRC21, rounded up and ignoring bitstuffing
- */
-#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8)
+ * Stuff Count 4
+ * CRC sequence (CRC21) 21
+ * CRC Delimiter 1
+ * Fixed stuff bits 7
+ */
+#define CANFD_FRAME_CRC21_FIELD_BITS 33
+
+/*
+ * Size of a CAN(-FD) Frame footer in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * ACK slot 1
+ * ACK delimiter 1
+ * End Of Frame (EOF) 7
+ *
+ * including all fields following the CRC field
+ */
+#define CAN_FRAME_FOOTER_BITS 9
+
+/*
+ * First part of the Inter Frame Space
+ * (a.k.a. IMF - intermission field)
+ */
+#define CAN_INTERMISSION_BITS 3
+
+/**
+ * can_bitstuffing_len() - Calculate the maximum length with bitstuffing
+ * @destuffed_len: length of a destuffed bit stream
+ *
+ * The worst bit stuffing case is a sequence in which dominant and
+ * recessive bits alternate every four bits:
+ *
+ * Destuffed: 1 1111 0000 1111 0000 1111
+ * Stuffed: 1 1111o 0000i 1111o 0000i 1111o
+ *
+ * Nomenclature
+ *
+ * - "0": dominant bit
+ * - "o": dominant stuff bit
+ * - "1": recessive bit
+ * - "i": recessive stuff bit
+ *
+ * Aside from the first bit, one stuff bit is added every four bits.
+ *
+ * Return: length of the stuffed bit stream in the worst case scenario.
+ */
+#define can_bitstuffing_len(destuffed_len) \
+ (destuffed_len + (destuffed_len - 1) / 4)
+
+#define __can_bitstuffing_len(bitstuffing, destuffed_len) \
+ (bitstuffing ? can_bitstuffing_len(destuffed_len) : \
+ destuffed_len)
+
+#define __can_cc_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ __can_bitstuffing_len(bitstuffing, \
+ (is_eff ? CAN_FRAME_HEADER_EFF_BITS : \
+ CAN_FRAME_HEADER_SFF_BITS) + \
+ (data_len) * BITS_PER_BYTE + \
+ CAN_FRAME_CRC_FIELD_BITS) + \
+ CAN_FRAME_FOOTER_BITS + \
+ (intermission ? CAN_INTERMISSION_BITS : 0) \
+)
+
+#define __can_fd_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ __can_bitstuffing_len(bitstuffing, \
+ (is_eff ? CANFD_FRAME_HEADER_EFF_BITS : \
+ CANFD_FRAME_HEADER_SFF_BITS) + \
+ (data_len) * BITS_PER_BYTE) + \
+ ((data_len) <= 16 ? \
+ CANFD_FRAME_CRC17_FIELD_BITS : \
+ CANFD_FRAME_CRC21_FIELD_BITS) + \
+ CAN_FRAME_FOOTER_BITS + \
+ (intermission ? CAN_INTERMISSION_BITS : 0) \
+)
+
+/**
+ * can_frame_bits() - Calculate the number of bits on the wire in a
+ * CAN frame
+ * @is_fd: true: CAN-FD frame; false: Classical CAN frame.
+ * @is_eff: true: Extended frame; false: Standard frame.
+ * @bitstuffing: true: calculate the bitstuffing worst case; false:
+ * calculate the bitstuffing best case (no dynamic
+ * bitstuffing). CAN-FD's fixed stuff bits are always included.
+ * @intermission: if and only if true, include the inter frame space
+ * assuming no bus idle (i.e. only the intermission). Strictly
+ * speaking, the inter frame space is not part of the
+ * frame. However, it is needed when calculating the delay
+ * between the Start Of Frame of two consecutive frames.
+ * @data_len: length of the data field in bytes. Correspond to
+ * can(fd)_frame->len. Should be zero for remote frames. No
+ * sanitization is done on @data_len and it shall have no side
+ * effects.
+ *
+ * Return: the numbers of bits on the wire of a CAN frame.
+ */
+#define can_frame_bits(is_fd, is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ is_fd ? __can_fd_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) : \
+ __can_cc_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+)
+
+/*
+ * Number of bytes in a CAN frame
+ * (rounded up, including intermission)
+ */
+#define can_frame_bytes(is_fd, is_eff, bitstuffing, data_len) \
+ DIV_ROUND_UP(can_frame_bits(is_fd, is_eff, bitstuffing, \
+ true, data_len), \
+ BITS_PER_BYTE)
/*
* Maximum size of a Classical CAN frame
- * (rounded up and ignoring bitstuffing)
+ * (rounded up, ignoring bitstuffing but including intermission)
*/
-#define CAN_FRAME_LEN_MAX (CAN_FRAME_OVERHEAD_EFF + CAN_MAX_DLEN)
+#define CAN_FRAME_LEN_MAX can_frame_bytes(false, true, false, CAN_MAX_DLEN)
/*
* Maximum size of a CAN-FD frame
- * (rounded up and ignoring bitstuffing)
+ * (rounded up, ignoring dynamic bitstuffing but including intermission)
*/
-#define CANFD_FRAME_LEN_MAX (CANFD_FRAME_OVERHEAD_EFF + CANFD_MAX_DLEN)
+#define CANFD_FRAME_LEN_MAX can_frame_bytes(true, true, false, CANFD_MAX_DLEN)
/*
* can_cc_dlc2len(value) - convert a given data length code (dlc) of a
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index c205c51d79c9..d29bb4521947 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -3,7 +3,7 @@
* linux/can/rx-offload.h
*
* Copyright (c) 2014 David Jander, Protonic Holland
- * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014-2017, 2023 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
*/
#ifndef _CAN_RX_OFFLOAD_H
@@ -44,11 +44,14 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp);
-unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp,
- unsigned int *frame_len_ptr);
+unsigned int can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
+unsigned int can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
+ unsigned int idx,
+ unsigned int *frame_len_ptr);
void can_rx_offload_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 67caa909e3e6..98c6fd0b39b6 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -13,6 +13,7 @@
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/list.h>
+#include <linux/blkdev.h>
#include <scsi/scsi_common.h>
#include <uapi/linux/cdrom.h>
@@ -61,9 +62,9 @@ struct cdrom_device_info {
__u8 last_sense;
__u8 media_written; /* dirty flag, DVD+RW bookkeeping */
unsigned short mmc3_profile; /* current MMC3 profile */
- int for_data;
int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
+ bool opened_for_data;
__s64 last_media_change_ms;
};
@@ -101,11 +102,10 @@ int cdrom_read_tocentry(struct cdrom_device_info *cdi,
struct cdrom_tocentry *entry);
/* the general block_device operations structure: */
-extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode);
-extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
-extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg);
+int cdrom_open(struct cdrom_device_info *cdi, blk_mode_t mode);
+void cdrom_release(struct cdrom_device_info *cdi);
+int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ unsigned int cmd, unsigned long arg);
extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
unsigned int clearing);
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
index 35ef41d8a61a..6355a36a3f81 100644
--- a/include/linux/cdx/cdx_bus.h
+++ b/include/linux/cdx/cdx_bus.h
@@ -14,7 +14,6 @@
#include <linux/mod_devicetable.h>
#define MAX_CDX_DEV_RESOURCES 4
-#define CDX_ANY_ID (0xFFFF)
#define CDX_CONTROLLER_ID_SHIFT 4
#define CDX_BUS_NUM_MASK 0xF
@@ -22,13 +21,19 @@
struct cdx_controller;
enum {
+ CDX_DEV_BUS_MASTER_CONF,
CDX_DEV_RESET_CONF,
};
struct cdx_device_config {
u8 type;
+ bool bus_master_enable;
};
+typedef int (*cdx_bus_enable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
+typedef int (*cdx_bus_disable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
typedef int (*cdx_scan_cb)(struct cdx_controller *cdx);
typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
@@ -36,6 +41,19 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
struct cdx_device_config *dev_config);
/**
+ * CDX_DEVICE - macro used to describe a specific CDX device
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ *
+ * This macro is used to create a struct cdx_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
+ */
+#define CDX_DEVICE(vend, dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = CDX_ANY_ID, .subdevice = CDX_ANY_ID
+
+/**
* CDX_DEVICE_DRIVER_OVERRIDE - macro used to describe a CDX device with
* override_only flags.
* @vend: the 16 bit CDX Vendor ID
@@ -43,18 +61,24 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
* @driver_override: the 32 bit CDX Device override_only
*
* This macro is used to create a struct cdx_device_id that matches only a
- * driver_override device.
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
*/
#define CDX_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
- .vendor = (vend), .device = (dev), .override_only = (driver_override)
+ .vendor = (vend), .device = (dev), .subvendor = CDX_ANY_ID,\
+ .subdevice = CDX_ANY_ID, .override_only = (driver_override)
/**
* struct cdx_ops - Callbacks supported by CDX controller.
+ * @bus_enable: enable bus on the controller
+ * @bus_disable: disable bus on the controller
* @scan: scan the devices on the controller
* @dev_configure: configuration like reset, master_enable,
* msi_config etc for a CDX device
*/
struct cdx_ops {
+ cdx_bus_enable_cb bus_enable;
+ cdx_bus_disable_cb bus_disable;
cdx_scan_cb scan;
cdx_dev_configure_cb dev_configure;
};
@@ -64,12 +88,14 @@ struct cdx_ops {
* @dev: Linux device associated with the CDX controller.
* @priv: private data
* @id: Controller ID
+ * @controller_registered: controller registered with bus
* @ops: CDX controller ops
*/
struct cdx_controller {
struct device *dev;
void *priv;
u32 id;
+ bool controller_registered;
struct cdx_ops *ops;
};
@@ -79,14 +105,21 @@ struct cdx_controller {
* @cdx: CDX controller associated with the device
* @vendor: Vendor ID for CDX device
* @device: Device ID for CDX device
+ * @subsystem_vendor: Subsystem Vendor ID for CDX device
+ * @subsystem_device: Subsystem Device ID for CDX device
+ * @class: Class for the CDX device
+ * @revision: Revision of the CDX device
* @bus_num: Bus number for this CDX device
* @dev_num: Device number for this device
* @res: array of MMIO region entries
* @res_attr: resource binary attribute
+ * @debugfs_dir: debugfs directory for this device
* @res_count: number of valid MMIO regions
* @dma_mask: Default DMA mask
* @flags: CDX device flags
* @req_id: Requestor ID associated with CDX device
+ * @is_bus: Is this bus device
+ * @enabled: is this bus enabled
* @driver_override: driver name to force a match; do not set directly,
* because core frees it; use driver_set_override() to
* set or clear it.
@@ -96,19 +129,36 @@ struct cdx_device {
struct cdx_controller *cdx;
u16 vendor;
u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem_device;
+ u32 class;
+ u8 revision;
u8 bus_num;
u8 dev_num;
struct resource res[MAX_CDX_DEV_RESOURCES];
+ struct bin_attribute *res_attr[MAX_CDX_DEV_RESOURCES];
+ struct dentry *debugfs_dir;
u8 res_count;
u64 dma_mask;
u16 flags;
u32 req_id;
+ bool is_bus;
+ bool enabled;
const char *driver_override;
};
#define to_cdx_device(_dev) \
container_of(_dev, struct cdx_device, dev)
+#define cdx_resource_start(dev, num) ((dev)->res[(num)].start)
+#define cdx_resource_end(dev, num) ((dev)->res[(num)].end)
+#define cdx_resource_flags(dev, num) ((dev)->res[(num)].flags)
+#define cdx_resource_len(dev, num) \
+ ((cdx_resource_start((dev), (num)) == 0 && \
+ cdx_resource_end((dev), (num)) == \
+ cdx_resource_start((dev), (num))) ? 0 : \
+ (cdx_resource_end((dev), (num)) - \
+ cdx_resource_start((dev), (num)) + 1))
/**
* struct cdx_driver - CDX device driver
* @driver: Generic device driver
@@ -171,4 +221,20 @@ extern struct bus_type cdx_bus_type;
*/
int cdx_dev_reset(struct device *dev);
+/**
+ * cdx_set_master - enables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to enable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_set_master(struct cdx_device *cdx_dev);
+
+/**
+ * cdx_clear_master - disables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to disable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_clear_master(struct cdx_device *cdx_dev);
+
#endif /* _CDX_BUS_H_ */
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index d5a5da838caf..11a92a946016 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -19,12 +19,25 @@
pr_debug("%.*s %12.12s:%-4d : " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \
kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug("%.*s %12.12s:%-4d : [%pU %llu] " fmt, \
+ 8 - (int)sizeof(KBUILD_MODNAME), " ", \
+ kbasename(__FILE__), __LINE__, \
+ &client->fsid, client->monc.auth->global_id, \
+ ##__VA_ARGS__)
# else
/* faux printk call just to see any compiler warnings. */
# define dout(fmt, ...) do { \
if (0) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
+# define doutc(client, fmt, ...) do { \
+ if (0) \
+ printk(KERN_DEBUG "[%pU %llu] " fmt, \
+ &client->fsid, \
+ client->monc.auth->global_id, \
+ ##__VA_ARGS__); \
+ } while (0)
# endif
#else
@@ -33,7 +46,32 @@
* or, just wrap pr_debug
*/
# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug(" [%pU %llu] %s: " fmt, &client->fsid, \
+ client->monc.auth->global_id, __func__, ##__VA_ARGS__)
#endif
+#define pr_notice_client(client, fmt, ...) \
+ pr_notice("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_info_client(client, fmt, ...) \
+ pr_info("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_client(client, fmt, ...) \
+ pr_warn("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_once_client(client, fmt, ...) \
+ pr_warn_once("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_client(client, fmt, ...) \
+ pr_err("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_ratelimited_client(client, fmt, ...) \
+ pr_warn_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_ratelimited_client(client, fmt, ...) \
+ pr_err_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+
#endif
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 49586ff26152..ee1d0e5f9789 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -357,16 +357,26 @@ enum {
CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
-extern const char *ceph_mds_op_name(int op);
+#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE || \
+ op == CEPH_MDS_OP_MKNOD || \
+ op == CEPH_MDS_OP_MKDIR || \
+ op == CEPH_MDS_OP_SYMLINK)
+extern const char *ceph_mds_op_name(int op);
-#define CEPH_SETATTR_MODE 1
-#define CEPH_SETATTR_UID 2
-#define CEPH_SETATTR_GID 4
-#define CEPH_SETATTR_MTIME 8
-#define CEPH_SETATTR_ATIME 16
-#define CEPH_SETATTR_SIZE 32
-#define CEPH_SETATTR_CTIME 64
+#define CEPH_SETATTR_MODE (1 << 0)
+#define CEPH_SETATTR_UID (1 << 1)
+#define CEPH_SETATTR_GID (1 << 2)
+#define CEPH_SETATTR_MTIME (1 << 3)
+#define CEPH_SETATTR_ATIME (1 << 4)
+#define CEPH_SETATTR_SIZE (1 << 5)
+#define CEPH_SETATTR_CTIME (1 << 6)
+#define CEPH_SETATTR_MTIME_NOW (1 << 7)
+#define CEPH_SETATTR_ATIME_NOW (1 << 8)
+#define CEPH_SETATTR_BTIME (1 << 9)
+#define CEPH_SETATTR_KILL_SGUID (1 << 10)
+#define CEPH_SETATTR_FSCRYPT_AUTH (1 << 11)
+#define CEPH_SETATTR_FSCRYPT_FILE (1 << 12)
/*
* Ceph setxattr request flags.
@@ -479,7 +489,7 @@ union ceph_mds_request_args_ext {
#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */
-struct ceph_mds_request_head_old {
+struct ceph_mds_request_head_legacy {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
@@ -492,9 +502,9 @@ struct ceph_mds_request_head_old {
union ceph_mds_request_args args;
} __attribute__ ((packed));
-#define CEPH_MDS_REQUEST_HEAD_VERSION 1
+#define CEPH_MDS_REQUEST_HEAD_VERSION 3
-struct ceph_mds_request_head {
+struct ceph_mds_request_head_old {
__le16 version; /* struct version */
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
@@ -508,6 +518,26 @@ struct ceph_mds_request_head {
union ceph_mds_request_args_ext args;
} __attribute__ ((packed));
+struct ceph_mds_request_head {
+ __le16 version; /* struct version */
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* legacy count retry and fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args_ext args;
+
+ __le32 ext_num_retry; /* new count retry attempts */
+ __le32 ext_num_fwd; /* new count fwd attempts */
+
+ __le32 struct_len; /* to store size of struct ceph_mds_request_head */
+ __le32 owner_uid, owner_gid; /* used for OPs which create inodes */
+} __attribute__ ((packed));
+
/* cap/lease release record */
struct ceph_mds_request_release {
__le64 ino, cap_id; /* ino and unique cap id */
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
deleted file mode 100644
index 4c3e0648dc27..000000000000
--- a/include/linux/ceph/mdsmap.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _FS_CEPH_MDSMAP_H
-#define _FS_CEPH_MDSMAP_H
-
-#include <linux/bug.h>
-#include <linux/ceph/types.h>
-
-/*
- * mds map - describe servers in the mds cluster.
- *
- * we limit fields to those the client actually xcares about
- */
-struct ceph_mds_info {
- u64 global_id;
- struct ceph_entity_addr addr;
- s32 state;
- int num_export_targets;
- bool laggy;
- u32 *export_targets;
-};
-
-struct ceph_mdsmap {
- u32 m_epoch, m_client_epoch, m_last_failure;
- u32 m_root;
- u32 m_session_timeout; /* seconds */
- u32 m_session_autoclose; /* seconds */
- u64 m_max_file_size;
- u64 m_max_xattr_size; /* maximum size for xattrs blob */
- u32 m_max_mds; /* expected up:active mds number */
- u32 m_num_active_mds; /* actual up:active mds number */
- u32 possible_max_rank; /* possible max rank index */
- struct ceph_mds_info *m_info;
-
- /* which object pools file data can be stored in */
- int m_num_data_pg_pools;
- u64 *m_data_pg_pools;
- u64 m_cas_pg_pool;
-
- bool m_enabled;
- bool m_damaged;
- int m_num_laggy;
-};
-
-static inline struct ceph_entity_addr *
-ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
-{
- if (w >= m->possible_max_rank)
- return NULL;
- return &m->m_info[w].addr;
-}
-
-static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
-{
- BUG_ON(w < 0);
- if (w >= m->possible_max_rank)
- return CEPH_MDS_STATE_DNE;
- return m->m_info[w].state;
-}
-
-static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
-{
- if (w >= 0 && w < m->possible_max_rank)
- return m->m_info[w].laggy;
- return false;
-}
-
-extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2);
-extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
-extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
-
-#endif
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 99c1726be6ee..1717cc57cdac 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -17,6 +17,7 @@
struct ceph_msg;
struct ceph_connection;
+struct ceph_msg_data_cursor;
/*
* Ceph defines these callbacks for handling connection events.
@@ -70,6 +71,30 @@ struct ceph_connection_operations {
int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt);
+
+ /**
+ * sparse_read: read sparse data
+ * @con: connection we're reading from
+ * @cursor: data cursor for reading extents
+ * @buf: optional buffer to read into
+ *
+ * This should be called more than once, each time setting up to
+ * receive an extent into the current cursor position, and zeroing
+ * the holes between them.
+ *
+ * Returns amount of data to be read (in bytes), 0 if reading is
+ * complete, or -errno if there was an error.
+ *
+ * If @buf is set on a >0 return, then the data should be read into
+ * the provided buffer. Otherwise, it should be read into the cursor.
+ *
+ * The sparse read operation is expected to initialize the cursor
+ * with a length covering up to the end of the last extent.
+ */
+ int (*sparse_read)(struct ceph_connection *con,
+ struct ceph_msg_data_cursor *cursor,
+ char **buf);
+
};
/* use format string %s%lld */
@@ -98,6 +123,7 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
#endif /* CONFIG_BLOCK */
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
+ CEPH_MSG_DATA_ITER, /* data source/destination is an iov_iter */
};
#ifdef CONFIG_BLOCK
@@ -199,6 +225,7 @@ struct ceph_msg_data {
bool own_pages;
};
struct ceph_pagelist *pagelist;
+ struct iov_iter iter;
};
};
@@ -207,6 +234,7 @@ struct ceph_msg_data_cursor {
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
+ int sr_resid; /* residual sparse_read len */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
@@ -222,6 +250,10 @@ struct ceph_msg_data_cursor {
struct page *page; /* page from list */
size_t offset; /* bytes from list */
};
+ struct {
+ struct iov_iter iov_iter;
+ unsigned int lastlen;
+ };
};
};
@@ -251,6 +283,7 @@ struct ceph_msg {
struct kref kref;
bool more_to_follow;
bool needs_out_seq;
+ u64 sparse_read_total;
int front_alloc_len;
struct ceph_msgpool *pool;
@@ -309,6 +342,10 @@ struct ceph_connection_v1_info {
int in_base_pos; /* bytes read */
+ /* sparse reads */
+ struct kvec in_sr_kvec; /* current location to receive into */
+ u64 in_sr_len; /* amount of data in this extent */
+
/* message in temps */
u8 in_tag; /* protocol control byte */
struct ceph_msg_header in_hdr;
@@ -395,6 +432,7 @@ struct ceph_connection_v2_info {
void *conn_bufs[16];
int conn_buf_cnt;
+ int data_len_remain;
struct kvec in_sign_kvecs[8];
struct kvec out_sign_kvecs[8];
@@ -573,6 +611,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
#endif /* CONFIG_BLOCK */
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
+void ceph_msg_data_add_iter(struct ceph_msg *msg,
+ struct iov_iter *iter);
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
gfp_t flags, bool can_fail);
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index b658961156a0..7a9a40163c0f 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -19,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[];
+ struct ceph_entity_inst mon_inst[] __counted_by(num_mon);
};
struct ceph_mon_client;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index fb6be72104df..f66f6aac74f6 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -29,14 +29,63 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
#define CEPH_HOMELESS_OSD -1
-/* a given osd we're communicating with */
+/*
+ * A single extent in a SPARSE_READ reply.
+ *
+ * Note that these come from the OSD as little-endian values. On BE arches,
+ * we convert them in-place after receipt.
+ */
+struct ceph_sparse_extent {
+ u64 off;
+ u64 len;
+} __packed;
+
+/* Sparse read state machine state values */
+enum ceph_sparse_read_state {
+ CEPH_SPARSE_READ_HDR = 0,
+ CEPH_SPARSE_READ_EXTENTS,
+ CEPH_SPARSE_READ_DATA_LEN,
+ CEPH_SPARSE_READ_DATA_PRE,
+ CEPH_SPARSE_READ_DATA,
+};
+
+/*
+ * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of
+ * 64-bit offset/length pairs, and then all of the actual file data
+ * concatenated after it (sans holes).
+ *
+ * Unfortunately, we don't know how long the extent array is until we've
+ * started reading the data section of the reply. The caller should send down
+ * a destination buffer for the array, but we'll alloc one if it's too small
+ * or if the caller doesn't.
+ */
+struct ceph_sparse_read {
+ enum ceph_sparse_read_state sr_state; /* state machine state */
+ u64 sr_req_off; /* orig request offset */
+ u64 sr_req_len; /* orig request length */
+ u64 sr_pos; /* current pos in buffer */
+ int sr_index; /* current extent index */
+ u32 sr_datalen; /* length of actual data */
+ u32 sr_count; /* extent count in reply */
+ int sr_ext_len; /* length of extent array */
+ struct ceph_sparse_extent *sr_extent; /* extent array */
+};
+
+/*
+ * A given osd we're communicating with.
+ *
+ * Note that the o_requests tree can be searched while holding the "lock" mutex
+ * or the "o_requests_lock" spinlock. Insertion or removal requires both!
+ */
struct ceph_osd {
refcount_t o_ref;
+ int o_sparse_op_idx;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
struct rb_node o_node;
struct ceph_connection o_con;
+ spinlock_t o_requests_lock;
struct rb_root o_requests;
struct rb_root o_linger_requests;
struct rb_root o_backoff_mappings;
@@ -46,6 +95,7 @@ struct ceph_osd {
unsigned long lru_ttl;
struct list_head o_keepalive_item;
struct mutex lock;
+ struct ceph_sparse_read o_sparse_read;
};
#define CEPH_OSD_SLAB_OPS 2
@@ -59,6 +109,7 @@ enum ceph_osd_data_type {
CEPH_OSD_DATA_TYPE_BIO,
#endif /* CONFIG_BLOCK */
CEPH_OSD_DATA_TYPE_BVECS,
+ CEPH_OSD_DATA_TYPE_ITER,
};
struct ceph_osd_data {
@@ -82,6 +133,7 @@ struct ceph_osd_data {
struct ceph_bvec_iter bvec_pos;
u32 num_bvecs;
};
+ struct iov_iter iter;
};
};
@@ -98,6 +150,8 @@ struct ceph_osd_req_op {
u64 offset, length;
u64 truncate_size;
u32 truncate_seq;
+ int sparse_ext_cnt;
+ struct ceph_sparse_extent *sparse_ext;
struct ceph_osd_data osd_data;
} extent;
struct {
@@ -145,6 +199,9 @@ struct ceph_osd_req_op {
u32 src_fadvise_flags;
struct ceph_osd_data osd_data;
} copy_from;
+ struct {
+ u64 ver;
+ } assert_ver;
};
};
@@ -199,6 +256,7 @@ struct ceph_osd_request {
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
+ bool r_linger; /* don't resend on failure */
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
@@ -211,9 +269,9 @@ struct ceph_osd_request {
struct ceph_snap_context *r_snapc; /* for writes */
struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
- bool r_linger; /* don't resend on failure */
/* internal */
+ u64 r_version; /* data version sent in reply */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
ktime_t r_start_latency; /* ktime_t */
@@ -221,7 +279,7 @@ struct ceph_osd_request {
int r_attempts;
u32 r_map_dne_bound;
- struct ceph_osd_req_op r_ops[];
+ struct ceph_osd_req_op r_ops[] __counted_by(r_num_ops);
};
struct ceph_request_redirect {
@@ -450,6 +508,8 @@ void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
unsigned int which,
struct ceph_bvec_iter *bvec_pos);
+void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
+ unsigned int which, struct iov_iter *iter);
extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
unsigned int which,
@@ -504,6 +564,23 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
+int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
+
+/*
+ * How big an extent array should we preallocate for a sparse read? This is
+ * just a starting value. If we get more than this back from the OSD, the
+ * receiver will reallocate.
+ */
+#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16
+
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
+{
+ if (!cnt)
+ cnt = CEPH_SPARSE_EXT_ARRAY_INITIAL;
+
+ return __ceph_alloc_sparse_ext_map(op, cnt);
+}
+
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
@@ -558,5 +635,19 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_locator *oloc,
struct ceph_watch_item **watchers,
u32 *num_watchers);
-#endif
+/* Find offset into the buffer of the end of the extent map */
+static inline u64 ceph_sparse_ext_map_end(struct ceph_osd_req_op *op)
+{
+ struct ceph_sparse_extent *ext;
+
+ /* No extents? No data */
+ if (op->extent.sparse_ext_cnt == 0)
+ return 0;
+
+ ext = &op->extent.sparse_ext[op->extent.sparse_ext_cnt - 1];
+
+ return ext->off + ext->len - op->extent.offset;
+}
+
+#endif
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 43a7a1573b51..73c3efbec36c 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -524,6 +524,10 @@ struct ceph_osd_op {
__le64 cookie;
} __attribute__ ((packed)) notify;
struct {
+ __le64 unused;
+ __le64 ver;
+ } __attribute__ ((packed)) assert_ver;
+ struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
index 5e134f4ce8b7..f0df518e11dd 100644
--- a/include/linux/cfi.h
+++ b/include/linux/cfi.h
@@ -9,6 +9,14 @@
#include <linux/bug.h>
#include <linux/module.h>
+#include <asm/cfi.h>
+
+#ifndef cfi_get_offset
+static inline int cfi_get_offset(void)
+{
+ return 0;
+}
+#endif
#ifdef CONFIG_CFI_CLANG
enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
@@ -19,11 +27,13 @@ static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
{
return report_cfi_failure(regs, addr, NULL, 0);
}
+#endif /* CONFIG_CFI_CLANG */
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
bool is_cfi_trap(unsigned long addr);
+#else
+static inline bool is_cfi_trap(unsigned long addr) { return false; }
#endif
-#endif /* CONFIG_CFI_CLANG */
#ifdef CONFIG_MODULES
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
@@ -36,4 +46,8 @@ static inline void module_cfi_finalize(const Elf_Ehdr *hdr,
#endif /* CONFIG_ARCH_USES_CFI_TRAPS */
#endif /* CONFIG_MODULES */
+#ifndef CFI_NOSEAL
+#define CFI_NOSEAL(x)
+#endif
+
#endif /* _LINUX_CFI_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8a0d5466c7be..ea48c861cd36 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -115,6 +115,11 @@ enum {
* Enable recursive subtree protection
*/
CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
+
+ /*
+ * Enable hugetlb accounting for the memory controller.
+ */
+ CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
};
/* cftype->flags */
@@ -238,7 +243,7 @@ struct css_set {
* Lists running through all tasks using this cgroup group.
* mg_tasks lists tasks which belong to this cset but are in the
* process of being migrated out or in. Protected by
- * css_set_rwsem, but, during migration, once tasks are moved to
+ * css_set_lock, but, during migration, once tasks are moved to
* mg_tasks, it can be read safely while holding cgroup_mutex.
*/
struct list_head tasks;
@@ -342,6 +347,20 @@ struct cgroup_rstat_cpu {
struct cgroup_base_stat last_bstat;
/*
+ * This field is used to record the cumulative per-cpu time of
+ * the cgroup and its descendants. Currently it can be read via
+ * eBPF/drgn etc, and we are still trying to determine how to
+ * expose it in the cgroupfs interface.
+ */
+ struct cgroup_base_stat subtree_bstat;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the per-cpu subtree_bstat.
+ */
+ struct cgroup_base_stat last_subtree_bstat;
+
+ /*
* Child cgroups with stat updates on this cpu since the last read
* are linked on the parent's ->updated_children through
* ->updated_next.
@@ -477,6 +496,20 @@ struct cgroup {
struct cgroup_rstat_cpu __percpu *rstat_cpu;
struct list_head rstat_css_list;
+ /*
+ * Add padding to separate the read mostly rstat_cpu and
+ * rstat_css_list into a different cacheline from the following
+ * rstat_flush_next and *bstat fields which can have frequent updates.
+ */
+ CACHELINE_PADDING(_pad_);
+
+ /*
+ * A singly-linked list of cgroup structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock.
+ */
+ struct cgroup *rstat_flush_next;
+
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
struct cgroup_base_stat bstat;
@@ -529,6 +562,10 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+ struct rcu_head rcu; /* Must be near the top */
+
/*
* The root cgroup. The containing cgroup_root will be destroyed on its
* release. cgrp->ancestors[0] will be used overflowing into the
@@ -542,9 +579,6 @@ struct cgroup_root {
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
/* Hierarchy-specific flags */
unsigned int flags;
@@ -661,6 +695,8 @@ struct cgroup_subsys {
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
int (*css_extra_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);
+ int (*css_local_stat_show)(struct seq_file *seq,
+ struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 885f5395fcd0..34aaf0e87def 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -40,13 +40,11 @@ struct kernel_clone_args;
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
-/* walk only threadgroup leaders */
-#define CSS_TASK_ITER_PROCS (1U << 0)
-/* walk all threaded css_sets in the domain */
-#define CSS_TASK_ITER_THREADED (1U << 1)
-
-/* internal flags */
-#define CSS_TASK_ITER_SKIPPED (1U << 16)
+enum {
+ CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
+ CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
+ CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
+};
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
@@ -71,6 +69,7 @@ struct css_task_iter {
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
+extern spinlock_t css_set_lock;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -118,7 +117,6 @@ int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
void cgroup_file_show(struct cgroup_file *cfile, bool show);
-int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
@@ -389,7 +387,6 @@ static inline void cgroup_unlock(void)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
rcu_read_lock_sched_held() || \
@@ -692,7 +689,6 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
*/
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
void cgroup_rstat_flush(struct cgroup *cgrp);
-void cgroup_rstat_flush_atomic(struct cgroup *cgrp);
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
void cgroup_rstat_flush_release(void);
@@ -857,4 +853,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */
+struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
new file mode 100644
index 000000000000..c2d09bc4f976
--- /dev/null
+++ b/include/linux/cleanup.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GUARDS_H
+#define __LINUX_GUARDS_H
+
+#include <linux/compiler.h>
+
+/*
+ * DEFINE_FREE(name, type, free):
+ * simple helper macro that defines the required wrapper for a __free()
+ * based cleanup function. @free is an expression using '_T' to access the
+ * variable. @free should typically include a NULL test before calling a
+ * function, see the example below.
+ *
+ * __free(name):
+ * variable attribute to add a scoped based cleanup to the variable.
+ *
+ * no_free_ptr(var):
+ * like a non-atomic xchg(var, NULL), such that the cleanup function will
+ * be inhibited -- provided it sanely deals with a NULL value.
+ *
+ * NOTE: this has __must_check semantics so that it is harder to accidentally
+ * leak the resource.
+ *
+ * return_ptr(p):
+ * returns p while inhibiting the __free().
+ *
+ * Ex.
+ *
+ * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+ *
+ * void *alloc_obj(...)
+ * {
+ * struct obj *p __free(kfree) = kmalloc(...);
+ * if (!p)
+ * return NULL;
+ *
+ * if (!init_obj(p))
+ * return NULL;
+ *
+ * return_ptr(p);
+ * }
+ *
+ * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
+ * kfree() is fine to be called with a NULL value. This is on purpose. This way
+ * the compiler sees the end of our alloc_obj() function as:
+ *
+ * tmp = p;
+ * p = NULL;
+ * if (p)
+ * kfree(p);
+ * return tmp;
+ *
+ * And through the magic of value-propagation and dead-code-elimination, it
+ * eliminates the actual cleanup call and compiles into:
+ *
+ * return p;
+ *
+ * Without the NULL test it turns into a mess and the compiler can't help us.
+ */
+
+#define DEFINE_FREE(_name, _type, _free) \
+ static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
+
+#define __free(_name) __cleanup(__free_##_name)
+
+#define __get_and_null_ptr(p) \
+ ({ __auto_type __ptr = &(p); \
+ __auto_type __val = *__ptr; \
+ *__ptr = NULL; __val; })
+
+static inline __must_check
+const volatile void * __must_check_fn(const volatile void *val)
+{ return val; }
+
+#define no_free_ptr(p) \
+ ((typeof(p)) __must_check_fn(__get_and_null_ptr(p)))
+
+#define return_ptr(p) return no_free_ptr(p)
+
+
+/*
+ * DEFINE_CLASS(name, type, exit, init, init_args...):
+ * helper to define the destructor and constructor for a type.
+ * @exit is an expression using '_T' -- similar to FREE above.
+ * @init is an expression in @init_args resulting in @type
+ *
+ * EXTEND_CLASS(name, ext, init, init_args...):
+ * extends class @name to @name@ext with the new constructor
+ *
+ * CLASS(name, var)(args...):
+ * declare the variable @var as an instance of the named class
+ *
+ * Ex.
+ *
+ * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
+ *
+ * CLASS(fdget, f)(fd);
+ * if (!f.file)
+ * return -EBADF;
+ *
+ * // use 'f' without concern
+ */
+
+#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
+typedef _type class_##_name##_t; \
+static inline void class_##_name##_destructor(_type *p) \
+{ _type _T = *p; _exit; } \
+static inline _type class_##_name##_constructor(_init_args) \
+{ _type t = _init; return t; }
+
+#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+typedef class_##_name##_t class_##_name##ext##_t; \
+static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
+{ class_##_name##_destructor(p); } \
+static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+{ class_##_name##_t t = _init; return t; }
+
+#define CLASS(_name, var) \
+ class_##_name##_t var __cleanup(class_##_name##_destructor) = \
+ class_##_name##_constructor
+
+
+/*
+ * DEFINE_GUARD(name, type, lock, unlock):
+ * trivial wrapper around DEFINE_CLASS() above specifically
+ * for locks.
+ *
+ * DEFINE_GUARD_COND(name, ext, condlock)
+ * wrapper around EXTEND_CLASS above to add conditional lock
+ * variants to a base class, eg. mutex_trylock() or
+ * mutex_lock_interruptible().
+ *
+ * guard(name):
+ * an anonymous instance of the (guard) class, not recommended for
+ * conditional locks.
+ *
+ * scoped_guard (name, args...) { }:
+ * similar to CLASS(name, scope)(args), except the variable (with the
+ * explicit name 'scope') is declard in a for-loop such that its scope is
+ * bound to the next (compound) statement.
+ *
+ * for conditional locks the loop body is skipped when the lock is not
+ * acquired.
+ *
+ * scoped_cond_guard (name, fail, args...) { }:
+ * similar to scoped_guard(), except it does fail when the lock
+ * acquire fails.
+ *
+ */
+
+#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
+ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { return *_T; }
+
+#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
+ class_##_name##_t _T) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
+
+#define guard(_name) \
+ CLASS(_name, __UNIQUE_ID(guard))
+
+#define __guard_ptr(_name) class_##_name##_lock_ptr
+
+#define scoped_guard(_name, args...) \
+ for (CLASS(_name, scope)(args), \
+ *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
+
+#define scoped_cond_guard(_name, _fail, args...) \
+ for (CLASS(_name, scope)(args), \
+ *done = NULL; !done; done = (void *)1) \
+ if (!__guard_ptr(_name)(&scope)) _fail; \
+ else
+
+/*
+ * Additional helper macros for generating lock guards with types, either for
+ * locks that don't have a native type (eg. RCU, preempt) or those that need a
+ * 'fat' pointer (eg. spin_lock_irqsave).
+ *
+ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
+ *
+ * will result in the following type:
+ *
+ * typedef struct {
+ * type *lock; // 'type := void' for the _0 variant
+ * __VA_ARGS__;
+ * } class_##name##_t;
+ *
+ * As above, both _lock and _unlock are statements, except this time '_T' will
+ * be a pointer to the above struct.
+ */
+
+#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
+typedef struct { \
+ _type *lock; \
+ __VA_ARGS__; \
+} class_##_name##_t; \
+ \
+static inline void class_##_name##_destructor(class_##_name##_t *_T) \
+{ \
+ if (_T->lock) { _unlock; } \
+} \
+ \
+static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
+{ \
+ return _T->lock; \
+}
+
+
+#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
+static inline class_##_name##_t class_##_name##_constructor(_type *l) \
+{ \
+ class_##_name##_t _t = { .lock = l }, *_T = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
+static inline class_##_name##_t class_##_name##_constructor(void) \
+{ \
+ class_##_name##_t _t = { .lock = (void*)1 }, \
+ *_T __maybe_unused = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
+__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
+
+#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
+__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_0(_name, _lock)
+
+#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
+ EXTEND_CLASS(_name, _ext, \
+ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
+ if (_T->lock && !(_condlock)) _T->lock = NULL; \
+ _t; }), \
+ typeof_member(class_##_name##_t, lock) l) \
+ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); }
+
+
+#endif /* __LINUX_GUARDS_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index ef26dc4ca74e..810db66383b3 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -74,7 +74,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core,
unsigned long parent_rate);
/**
- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
+ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
*
* @num: Numerator of the duty cycle ratio
* @den: Denominator of the duty cycle ratio
@@ -129,7 +129,7 @@ struct clk_duty {
* @restore_context: Restore the context of the clock after a restoration
* of power.
*
- * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
+ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
* ensure that the prepare_mutex is held across this call. If the
* driver cannot figure out a rate for this clock, it must return
@@ -415,7 +415,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
*/
-#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_hw, flags, \
+#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
(parent_data), (flags), (fixed_rate), 0, \
@@ -448,15 +448,15 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
*/
#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \
parent_hw, flags, fixed_rate, fixed_accuracy) \
- __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
- NULL, NULL, (flags), (fixed_rate), \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (fixed_rate), \
(fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
* clock with the clock framework
* @dev: device that is registering this clock
* @name: name of this clock
- * @parent_name: name of clock's parent
+ * @parent_data: name of clock's parent
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
* @fixed_accuracy: non-adjustable clock accuracy
@@ -471,7 +471,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
* the clock framework
* @dev: device that is registering this clock
* @name: name of this clock
- * @parent_name: name of clock's parent
+ * @parent_data: name of clock's parent
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
*/
@@ -649,7 +649,7 @@ struct clk_div_table {
* Clock with an adjustable divider affecting its output frequency. Implements
* .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
* register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
* the raw value read from the register, with the value of zero considered
@@ -1130,11 +1130,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
* @mwidth: width of the numerator bit field
* @nshift: shift to the denominator bit field
* @nwidth: width of the denominator bit field
+ * @approximation: clk driver's callback for calculating the divider clock
* @lock: register lock
*
* Clock with adjustable fractional divider affecting its output frequency.
*
- * Flags:
+ * @flags:
* CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
* is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
* is set then the numerator and denominator are both the value read
@@ -1191,7 +1192,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
* Clock with an adjustable multiplier affecting its output frequency.
* Implements .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
* from the register, with 0 being a valid value effectively
* zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
@@ -1371,6 +1372,8 @@ int __clk_mux_determine_rate_closest(struct clk_hw *hw,
int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags);
+int clk_hw_determine_rate_no_reparent(struct clk_hw *hw,
+ struct clk_rate_request *req);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
unsigned long *max_rate);
@@ -1415,7 +1418,7 @@ struct clk_onecell_data {
struct clk_hw_onecell_data {
unsigned int num;
- struct clk_hw *hws[];
+ struct clk_hw *hws[] __counted_by(num);
};
#define CLK_OF_DECLARE(name, compat, fn) \
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 1ef013324237..06f1b292f8a0 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -183,6 +183,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
*/
bool clk_is_match(const struct clk *p, const struct clk *q);
+/**
+ * clk_rate_exclusive_get - get exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to get exclusive control over the rate of a
+ * provider. It prevents any other consumer to execute, even indirectly,
+ * opereation which could alter the rate of the provider or cause glitches
+ *
+ * If exlusivity is claimed more than once on clock, even by the same driver,
+ * the rate effectively gets locked as exclusivity can't be preempted.
+ *
+ * Must not be called from within atomic context.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_rate_exclusive_get(struct clk *clk);
+
+/**
+ * clk_rate_exclusive_put - release exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to release the exclusivity it previously got
+ * from clk_rate_exclusive_get()
+ *
+ * The caller must balance the number of clk_rate_exclusive_get() and
+ * clk_rate_exclusive_put() calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_rate_exclusive_put(struct clk *clk);
+
#else
static inline int clk_notifier_register(struct clk *clk,
@@ -236,6 +269,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
return p == q;
}
+static inline int clk_rate_exclusive_get(struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_rate_exclusive_put(struct clk *clk) {}
+
#endif
#ifdef CONFIG_HAVE_CLK_PREPARE
@@ -583,38 +623,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
*/
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id);
-/**
- * clk_rate_exclusive_get - get exclusivity over the rate control of a
- * producer
- * @clk: clock source
- *
- * This function allows drivers to get exclusive control over the rate of a
- * provider. It prevents any other consumer to execute, even indirectly,
- * opereation which could alter the rate of the provider or cause glitches
- *
- * If exlusivity is claimed more than once on clock, even by the same driver,
- * the rate effectively gets locked as exclusivity can't be preempted.
- *
- * Must not be called from within atomic context.
- *
- * Returns success (0) or negative errno.
- */
-int clk_rate_exclusive_get(struct clk *clk);
-
-/**
- * clk_rate_exclusive_put - release exclusivity over the rate control of a
- * producer
- * @clk: clock source
- *
- * This function allows drivers to release the exclusivity it previously got
- * from clk_rate_exclusive_get()
- *
- * The caller must balance the number of clk_rate_exclusive_get() and
- * clk_rate_exclusive_put() calls.
- *
- * Must not be called from within atomic context.
- */
-void clk_rate_exclusive_put(struct clk *clk);
/**
* clk_enable - inform the system when the clock source should be running.
@@ -974,14 +982,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
-
-static inline int clk_rate_exclusive_get(struct clk *clk)
-{
- return 0;
-}
-
-static inline void clk_rate_exclusive_put(struct clk *clk) {}
-
static inline int clk_enable(struct clk *clk)
{
return 0;
diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h
deleted file mode 100644
index 445130460380..000000000000
--- a/include/linux/clk/mmp.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CLK_MMP_H
-#define __CLK_MMP_H
-
-#include <linux/types.h>
-
-extern void pxa168_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-extern void pxa910_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys,
- phys_addr_t apbcp_phys);
-extern void mmp2_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-
-#endif
diff --git a/include/linux/closure.h b/include/linux/closure.h
new file mode 100644
index 000000000000..c554c6a08768
--- /dev/null
+++ b/include/linux/closure.h
@@ -0,0 +1,415 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOSURE_H
+#define _LINUX_CLOSURE_H
+
+#include <linux/llist.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/workqueue.h>
+
+/*
+ * Closure is perhaps the most overused and abused term in computer science, but
+ * since I've been unable to come up with anything better you're stuck with it
+ * again.
+ *
+ * What are closures?
+ *
+ * They embed a refcount. The basic idea is they count "things that are in
+ * progress" - in flight bios, some other thread that's doing something else -
+ * anything you might want to wait on.
+ *
+ * The refcount may be manipulated with closure_get() and closure_put().
+ * closure_put() is where many of the interesting things happen, when it causes
+ * the refcount to go to 0.
+ *
+ * Closures can be used to wait on things both synchronously and asynchronously,
+ * and synchronous and asynchronous use can be mixed without restriction. To
+ * wait synchronously, use closure_sync() - you will sleep until your closure's
+ * refcount hits 1.
+ *
+ * To wait asynchronously, use
+ * continue_at(cl, next_function, workqueue);
+ *
+ * passing it, as you might expect, the function to run when nothing is pending
+ * and the workqueue to run that function out of.
+ *
+ * continue_at() also, critically, requires a 'return' immediately following the
+ * location where this macro is referenced, to return to the calling function.
+ * There's good reason for this.
+ *
+ * To use safely closures asynchronously, they must always have a refcount while
+ * they are running owned by the thread that is running them. Otherwise, suppose
+ * you submit some bios and wish to have a function run when they all complete:
+ *
+ * foo_endio(struct bio *bio)
+ * {
+ * closure_put(cl);
+ * }
+ *
+ * closure_init(cl);
+ *
+ * do_stuff();
+ * closure_get(cl);
+ * bio1->bi_endio = foo_endio;
+ * bio_submit(bio1);
+ *
+ * do_more_stuff();
+ * closure_get(cl);
+ * bio2->bi_endio = foo_endio;
+ * bio_submit(bio2);
+ *
+ * continue_at(cl, complete_some_read, system_wq);
+ *
+ * If closure's refcount started at 0, complete_some_read() could run before the
+ * second bio was submitted - which is almost always not what you want! More
+ * importantly, it wouldn't be possible to say whether the original thread or
+ * complete_some_read()'s thread owned the closure - and whatever state it was
+ * associated with!
+ *
+ * So, closure_init() initializes a closure's refcount to 1 - and when a
+ * closure_fn is run, the refcount will be reset to 1 first.
+ *
+ * Then, the rule is - if you got the refcount with closure_get(), release it
+ * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
+ * on a closure because you called closure_init() or you were run out of a
+ * closure - _always_ use continue_at(). Doing so consistently will help
+ * eliminate an entire class of particularly pernicious races.
+ *
+ * Lastly, you might have a wait list dedicated to a specific event, and have no
+ * need for specifying the condition - you just want to wait until someone runs
+ * closure_wake_up() on the appropriate wait list. In that case, just use
+ * closure_wait(). It will return either true or false, depending on whether the
+ * closure was already on a wait list or not - a closure can only be on one wait
+ * list at a time.
+ *
+ * Parents:
+ *
+ * closure_init() takes two arguments - it takes the closure to initialize, and
+ * a (possibly null) parent.
+ *
+ * If parent is non null, the new closure will have a refcount for its lifetime;
+ * a closure is considered to be "finished" when its refcount hits 0 and the
+ * function to run is null. Hence
+ *
+ * continue_at(cl, NULL, NULL);
+ *
+ * returns up the (spaghetti) stack of closures, precisely like normal return
+ * returns up the C stack. continue_at() with non null fn is better thought of
+ * as doing a tail call.
+ *
+ * All this implies that a closure should typically be embedded in a particular
+ * struct (which its refcount will normally control the lifetime of), and that
+ * struct can very much be thought of as a stack frame.
+ */
+
+struct closure;
+struct closure_syncer;
+typedef void (closure_fn) (struct work_struct *);
+extern struct dentry *bcache_debug;
+
+struct closure_waitlist {
+ struct llist_head list;
+};
+
+enum closure_state {
+ /*
+ * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
+ * the thread that owns the closure, and cleared by the thread that's
+ * waking up the closure.
+ *
+ * The rest are for debugging and don't affect behaviour:
+ *
+ * CLOSURE_RUNNING: Set when a closure is running (i.e. by
+ * closure_init() and when closure_put() runs then next function), and
+ * must be cleared before remaining hits 0. Primarily to help guard
+ * against incorrect usage and accidentally transferring references.
+ * continue_at() and closure_return() clear it for you, if you're doing
+ * something unusual you can use closure_set_dead() which also helps
+ * annotate where references are being transferred.
+ */
+
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
+};
+
+#define CLOSURE_GUARD_MASK \
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
+
+#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
+#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
+
+struct closure {
+ union {
+ struct {
+ struct workqueue_struct *wq;
+ struct closure_syncer *s;
+ struct llist_node list;
+ closure_fn *fn;
+ };
+ struct work_struct work;
+ };
+
+ struct closure *parent;
+
+ atomic_t remaining;
+ bool closure_get_happened;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+#define CLOSURE_MAGIC_DEAD 0xc054dead
+#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+
+ unsigned int magic;
+ struct list_head all;
+ unsigned long ip;
+ unsigned long waiting_on;
+#endif
+};
+
+void closure_sub(struct closure *cl, int v);
+void closure_put(struct closure *cl);
+void __closure_wake_up(struct closure_waitlist *list);
+bool closure_wait(struct closure_waitlist *list, struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+static inline unsigned closure_nr_remaining(struct closure *cl)
+{
+ return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
+}
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened);
+#endif
+
+ if (cl->closure_get_happened)
+ __closure_sync(cl);
+}
+
+#ifdef CONFIG_DEBUG_CLOSURES
+
+void closure_debug_create(struct closure *cl);
+void closure_debug_destroy(struct closure *cl);
+
+#else
+
+static inline void closure_debug_create(struct closure *cl) {}
+static inline void closure_debug_destroy(struct closure *cl) {}
+
+#endif
+
+static inline void closure_set_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _THIS_IP_;
+#endif
+}
+
+static inline void closure_set_ret_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _RET_IP_;
+#endif
+}
+
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->waiting_on = f;
+#endif
+}
+
+static inline void closure_set_stopped(struct closure *cl)
+{
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+}
+
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
+
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(&cl->work);
+}
+
+/**
+ * closure_get - increment a closure's refcount
+ */
+static inline void closure_get(struct closure *cl)
+{
+ cl->closure_get_happened = true;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
+
+/**
+ * closure_init - Initialize a closure, setting the refcount to 1
+ * @cl: closure to initialize
+ * @parent: parent of the new closure. cl will take a refcount on it for its
+ * lifetime; may be NULL.
+ */
+static inline void closure_init(struct closure *cl, struct closure *parent)
+{
+ cl->fn = NULL;
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
+
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+ cl->closure_get_happened = false;
+
+ closure_debug_create(cl);
+ closure_set_ip(cl);
+}
+
+static inline void closure_init_stack(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+}
+
+/**
+ * closure_wake_up - wake up all closures on a wait list,
+ * with memory barrier
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+ /* Memory barrier for the wait list */
+ smp_mb();
+ __closure_wake_up(list);
+}
+
+#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
+#define closure_type(name, type, member) \
+ struct closure *cl = container_of(ws, struct closure, work); \
+ type *name = container_of(cl, type, member)
+
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
+ */
+#define continue_at(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_sub(_cl, CLOSURE_RUNNING + 1); \
+} while (0)
+
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
+#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
+#define continue_at_nobarrier(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_queue(_cl); \
+} while (0)
+
+/**
+ * closure_return_with_destructor - finish execution of a closure,
+ * with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
+#define closure_return_with_destructor(_cl, _destructor) \
+do { \
+ set_closure_fn(_cl, _destructor, NULL); \
+ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
+} while (0)
+
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
+static inline void closure_call(struct closure *cl, closure_fn fn,
+ struct workqueue_struct *wq,
+ struct closure *parent)
+{
+ closure_init(cl, parent);
+ continue_at_nobarrier(cl, fn, wq);
+}
+
+#define __closure_wait_event(waitlist, _cond) \
+do { \
+ struct closure cl; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) \
+ break; \
+ closure_sync(&cl); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+} while (0)
+
+#define closure_wait_event(waitlist, _cond) \
+do { \
+ if (!(_cond)) \
+ __closure_wait_event(waitlist, _cond); \
+} while (0)
+
+#endif /* _LINUX_CLOSURE_H */
diff --git a/include/linux/comedi/comedi_8254.h b/include/linux/comedi/comedi_8254.h
index d8264417e53c..d527f04400df 100644
--- a/include/linux/comedi/comedi_8254.h
+++ b/include/linux/comedi/comedi_8254.h
@@ -12,6 +12,8 @@
#define _COMEDI_8254_H
#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
struct comedi_device;
struct comedi_insn;
@@ -57,10 +59,24 @@ struct comedi_subdevice;
/* counter maps zero to 0x10000 */
#define I8254_MAX_COUNT 0x10000
+struct comedi_8254;
+
+/**
+ * typedef comedi_8254_iocb_fn - call-back function type for 8254 register access
+ * @i8254: pointer to struct comedi_8254
+ * @dir: direction (0 = read, 1 = write)
+ * @reg: register number
+ * @val: value to write
+ *
+ * Return: Register value when reading, 0 when writing.
+ */
+typedef unsigned int comedi_8254_iocb_fn(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val);
+
/**
* struct comedi_8254 - private data used by this module
- * @iobase: PIO base address of the registers (in/out)
- * @mmio: MMIO base address of the registers (read/write)
+ * @iocb: I/O call-back function for register access
+ * @context: context for register access (e.g. a base address)
* @iosize: I/O size used to access the registers (b/w/l)
* @regshift: register gap shift
* @osc_base: cascaded oscillator speed in ns
@@ -76,8 +92,8 @@ struct comedi_subdevice;
* @insn_config: driver specific (*insn_config) callback
*/
struct comedi_8254 {
- unsigned long iobase;
- void __iomem *mmio;
+ comedi_8254_iocb_fn *iocb;
+ unsigned long context;
unsigned int iosize;
unsigned int regshift;
unsigned int osc_base;
@@ -122,13 +138,24 @@ void comedi_8254_set_busy(struct comedi_8254 *i8254,
void comedi_8254_subdevice_init(struct comedi_subdevice *s,
struct comedi_8254 *i8254);
-struct comedi_8254 *comedi_8254_init(unsigned long iobase,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
-struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
+#ifdef CONFIG_HAS_IOPORT
+struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+#else
+static inline struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift)
+{
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
+struct comedi_8254 *comedi_8254_mm_alloc(void __iomem *mmio,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
#endif /* _COMEDI_8254_H */
diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h
index b2a5bc6b3a49..d24a69da389b 100644
--- a/include/linux/comedi/comedi_8255.h
+++ b/include/linux/comedi/comedi_8255.h
@@ -10,6 +10,8 @@
#ifndef _COMEDI_8255_H
#define _COMEDI_8255_H
+#include <linux/errno.h>
+
#define I8255_SIZE 0x04
#define I8255_DATA_A_REG 0x00
@@ -27,16 +29,26 @@
struct comedi_device;
struct comedi_subdevice;
-int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase);
+#ifdef CONFIG_HAS_IOPORT
+int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase);
+#else
+static inline int subdev_8255_io_init(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned long regbase)
+{
+ return -ENXIO;
+}
+#endif
int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
unsigned long regbase);
+int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long context),
+ unsigned long context);
+
unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
#endif
diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h
index 0a1150900ef3..c08416a7364b 100644
--- a/include/linux/comedi/comedidev.h
+++ b/include/linux/comedi/comedidev.h
@@ -633,7 +633,7 @@ extern const struct comedi_lrange range_unknown;
*/
struct comedi_lrange {
int length;
- struct comedi_krange range[];
+ struct comedi_krange range[] __counted_by(length);
};
/**
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index a6e512cfb670..e94776496049 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -89,89 +89,17 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
const struct alloc_context *ac, enum compact_priority prio,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
-extern enum compact_result compaction_suitable(struct zone *zone, int order,
- unsigned int alloc_flags, int highest_zoneidx);
+extern bool compaction_suitable(struct zone *zone, int order,
+ int highest_zoneidx);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
-/* Compaction has made some progress and retrying makes sense */
-static inline bool compaction_made_progress(enum compact_result result)
-{
- /*
- * Even though this might sound confusing this in fact tells us
- * that the compaction successfully isolated and migrated some
- * pageblocks.
- */
- if (result == COMPACT_SUCCESS)
- return true;
-
- return false;
-}
-
-/* Compaction has failed and it doesn't make much sense to keep retrying. */
-static inline bool compaction_failed(enum compact_result result)
-{
- /* All zones were scanned completely and still not result. */
- if (result == COMPACT_COMPLETE)
- return true;
-
- return false;
-}
-
-/* Compaction needs reclaim to be performed first, so it can continue. */
-static inline bool compaction_needs_reclaim(enum compact_result result)
-{
- /*
- * Compaction backed off due to watermark checks for order-0
- * so the regular reclaim has to try harder and reclaim something.
- */
- if (result == COMPACT_SKIPPED)
- return true;
-
- return false;
-}
-
-/*
- * Compaction has backed off for some reason after doing some work or none
- * at all. It might be throttling or lock contention. Retrying might be still
- * worthwhile, but with a higher priority if allowed.
- */
-static inline bool compaction_withdrawn(enum compact_result result)
-{
- /*
- * If compaction is deferred for high-order allocations, it is
- * because sync compaction recently failed. If this is the case
- * and the caller requested a THP allocation, we do not want
- * to heavily disrupt the system, so we fail the allocation
- * instead of entering direct reclaim.
- */
- if (result == COMPACT_DEFERRED)
- return true;
-
- /*
- * If compaction in async mode encounters contention or blocks higher
- * priority task we back off early rather than cause stalls.
- */
- if (result == COMPACT_CONTENDED)
- return true;
-
- /*
- * Page scanners have met but we haven't scanned full zones so this
- * is a back off in fact.
- */
- if (result == COMPACT_PARTIAL_SKIPPED)
- return true;
-
- return false;
-}
-
-
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags);
-extern void kcompactd_run(int nid);
-extern void kcompactd_stop(int nid);
+extern void __meminit kcompactd_run(int nid);
+extern void __meminit kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
#else
@@ -179,32 +107,12 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
-static inline enum compact_result compaction_suitable(struct zone *zone, int order,
- int alloc_flags, int highest_zoneidx)
-{
- return COMPACT_SKIPPED;
-}
-
-static inline bool compaction_made_progress(enum compact_result result)
-{
- return false;
-}
-
-static inline bool compaction_failed(enum compact_result result)
-{
- return false;
-}
-
-static inline bool compaction_needs_reclaim(enum compact_result result)
+static inline bool compaction_suitable(struct zone *zone, int order,
+ int highest_zoneidx)
{
return false;
}
-static inline bool compaction_withdrawn(enum compact_result result)
-{
- return true;
-}
-
static inline void kcompactd_run(int nid)
{
}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 44b1736c95b5..233f61ec8afc 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -581,11 +581,6 @@ asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
struct io_event __user *events,
struct __kernel_timespec __user *timeout,
const struct __compat_aio_sigset __user *usig);
-
-/* fs/cookies.c */
-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
-
-/* fs/eventpoll.c */
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct epoll_event __user *events,
int maxevents, int timeout,
@@ -597,18 +592,12 @@ asmlinkage long compat_sys_epoll_pwait2(int epfd,
const struct __kernel_timespec __user *timeout,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-
-/* fs/fcntl.c */
asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
-
-/* fs/ioctl.c */
asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
-
-/* fs/open.c */
asmlinkage long compat_sys_statfs(const char __user *pathname,
struct compat_statfs __user *buf);
asmlinkage long compat_sys_statfs64(const char __user *pathname,
@@ -623,13 +612,9 @@ asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
/* No generic prototype for truncate64, ftruncate64, fallocate */
asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
int flags, umode_t mode);
-
-/* fs/readdir.c */
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-
-/* fs/read_write.c */
asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
/* No generic prototype for pread64 and pwrite64 */
asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
@@ -649,14 +634,10 @@ asmlinkage long compat_sys_pwritev64(unsigned long fd,
const struct iovec __user *vec,
unsigned long vlen, loff_t pos);
#endif
-
-/* fs/sendfile.c */
asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
compat_off_t __user *offset, compat_size_t count);
asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
compat_loff_t __user *offset, compat_size_t count);
-
-/* fs/select.c */
asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
@@ -677,68 +658,45 @@ asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds,
struct __kernel_timespec __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-
-/* fs/signalfd.c */
asmlinkage long compat_sys_signalfd4(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, int flags);
-
-/* fs/stat.c */
asmlinkage long compat_sys_newfstatat(unsigned int dfd,
const char __user *filename,
struct compat_stat __user *statbuf,
int flag);
asmlinkage long compat_sys_newfstat(unsigned int fd,
struct compat_stat __user *statbuf);
-
-/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */
-
-/* kernel/exit.c */
+/* No generic prototype for sync_file_range and sync_file_range2 */
asmlinkage long compat_sys_waitid(int, compat_pid_t,
struct compat_siginfo __user *, int,
struct compat_rusage __user *);
-
-
-
-/* kernel/futex.c */
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
compat_size_t len);
asmlinkage long
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
-
-/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
struct old_itimerval32 __user *it);
asmlinkage long compat_sys_setitimer(int which,
struct old_itimerval32 __user *in,
struct old_itimerval32 __user *out);
-
-/* kernel/kexec.c */
asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
compat_ulong_t nr_segments,
struct compat_kexec_segment __user *,
compat_ulong_t flags);
-
-/* kernel/posix-timers.c */
asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
-
-/* kernel/ptrace.c */
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
compat_long_t addr, compat_long_t data);
-
-/* kernel/sched/core.c */
asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
-
-/* kernel/signal.c */
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
compat_stack_t __user *uoss_ptr);
asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
@@ -763,25 +721,17 @@ asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
-
-/* kernel/sys.c */
asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
asmlinkage long compat_sys_getrlimit(unsigned int resource,
struct compat_rlimit __user *rlim);
asmlinkage long compat_sys_setrlimit(unsigned int resource,
struct compat_rlimit __user *rlim);
asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
-
-/* kernel/time.c */
asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
-
-/* kernel/timer.c */
asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
-
-/* ipc/mqueue.c */
asmlinkage long compat_sys_mq_open(const char __user *u_name,
int oflag, compat_mode_t mode,
struct compat_mq_attr __user *u_attr);
@@ -790,22 +740,14 @@ asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
const struct compat_mq_attr __user *u_mqstat,
struct compat_mq_attr __user *u_omqstat);
-
-/* ipc/msg.c */
asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr);
asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);
-
-/* ipc/sem.c */
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
-
-/* ipc/shm.c */
asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
-
-/* net/socket.c */
asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
unsigned flags, struct sockaddr __user *addr,
int __user *addrlen);
@@ -813,20 +755,13 @@ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
unsigned flags);
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
unsigned int flags);
-
-/* mm/filemap.c: No generic prototype for readahead */
-
-/* security/keys/keyctl.c */
+/* No generic prototype for readahead */
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
-
-/* arch/example/kernel/sys_example.c */
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp);
-
-/* mm/fadvise.c: No generic prototype for fadvise64_64 */
-
-/* mm/, CONFIG_MMU only */
+/* No generic prototype for fadvise64_64 */
+/* CONFIG_MMU only */
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
@@ -896,18 +831,18 @@ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
-/* obsolete: fs/readdir.c */
+/* obsolete */
asmlinkage long compat_sys_old_readdir(unsigned int fd,
struct compat_old_linux_dirent __user *,
unsigned int count);
-/* obsolete: fs/select.c */
+/* obsolete */
asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
-/* obsolete: ipc */
+/* obsolete */
asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
-/* obsolete: kernel/signal.c */
+/* obsolete */
#ifdef __ARCH_WANT_SYS_SIGPENDING
asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
#endif
@@ -922,7 +857,7 @@ asmlinkage long compat_sys_sigaction(int sig,
struct compat_old_sigaction __user *oact);
#endif
-/* obsolete: net/socket.c */
+/* obsolete */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
#ifdef __ARCH_WANT_COMPAT_TRUNCATE64
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 6cfd6902bd5b..ddab1ef22bee 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -5,10 +5,14 @@
/* Compiler specific definitions for Clang compiler */
-/* same as gcc, this was present in clang-2.6 so we can assume it works
- * with any version that can compile the kernel
+/*
+ * Clang prior to 17 is being silly and considers many __cleanup() variables
+ * as unused (because they are, their sole purpose is to go out of scope).
+ *
+ * https://reviews.llvm.org/D152180
*/
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+#undef __cleanup
+#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
/* all clang versions usable with the kernel support KASAN ABI version 5 */
#define KASAN_ABI_VERSION 5
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 7af9e34ec261..75bd1692d2e3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -39,8 +39,6 @@
#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
@@ -66,6 +64,26 @@
__builtin_unreachable(); \
} while (0)
+/*
+ * GCC 'asm goto' with outputs miscompiles certain code sequences:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921
+ *
+ * Work around it via the same compiler barrier quirk that we used
+ * to use for the old 'asm goto' workaround.
+ *
+ * Also, always mark such 'asm goto' statements as volatile: all
+ * asm goto statements are supposed to be volatile as per the
+ * documentation, but some versions of gcc didn't actually do
+ * that for asms with outputs:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619
+ */
+#ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND
+#define asm_goto_output(x...) \
+ do { asm volatile goto(x); asm (""); } while (0)
+#endif
+
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
@@ -138,7 +156,7 @@
#endif
#define __diag_ignore_all(option, comment) \
- __diag_GCC(8, ignore, option)
+ __diag(__diag_GCC_ignore option)
/*
* Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d7779a18b24f..bb1339c7057b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -177,10 +177,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
__asm__ ("" : "=r" (var) : "0" (var))
#endif
-/* Not-quite-unique ID. */
-#ifndef __UNIQUE_ID
-# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
-#endif
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
/**
* data_race - mark an expression as containing intentional data races
@@ -231,6 +228,14 @@ static inline void *offset_to_ptr(const int *off)
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
+/*
* Whether 'type' is a signed type or an unsigned type. Supports scalar types,
* bool and also pointer types.
*/
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index e659cb6fded3..28566624f008 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -70,6 +70,12 @@
#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
+ */
+#define __cleanup(func) __attribute__((__cleanup__(func)))
+
+/*
* Note the long name.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
@@ -89,6 +95,19 @@
#endif
/*
+ * Optional: only supported since gcc >= 14
+ * Optional: only supported since clang >= 18
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
+ * clang: https://reviews.llvm.org/D148381
+ */
+#if __has_attribute(__counted_by__)
+# define __counted_by(member) __attribute__((__counted_by__(member)))
+#else
+# define __counted_by(member)
+#endif
+
+/*
* Optional: not supported by gcc
* Optional: only supported since clang >= 14.0
*
@@ -256,6 +275,18 @@
#define __noreturn __attribute__((__noreturn__))
/*
+ * Optional: only supported since GCC >= 11.1, clang >= 7.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fstack_005fprotector-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-stack-protector-safebuffers
+ */
+#if __has_attribute(__no_stack_protector__)
+# define __no_stack_protector __attribute__((__no_stack_protector__))
+#else
+# define __no_stack_protector
+#endif
+
+/*
* Optional: not supported by gcc.
*
* clang: https://clang.llvm.org/docs/AttributeReference.html#overloadable
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 547ea1ff806e..0caf354cb94b 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -2,6 +2,15 @@
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
+/*
+ * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
+ * In the meantime, to support gcc < 10, we implement __has_builtin
+ * by hand.
+ */
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
#ifndef __ASSEMBLY__
/*
@@ -106,15 +115,32 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
#define __cold
#endif
-/* Builtins */
-
/*
- * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
- * In the meantime, to support gcc < 10, we implement __has_builtin
- * by hand.
+ * On x86-64 and arm64 targets, __preserve_most changes the calling convention
+ * of a function to make the code in the caller as unintrusive as possible. This
+ * convention behaves identically to the C calling convention on how arguments
+ * and return values are passed, but uses a different set of caller- and callee-
+ * saved registers.
+ *
+ * The purpose is to alleviates the burden of saving and recovering a large
+ * register set before and after the call in the caller. This is beneficial for
+ * rarely taken slow paths, such as error-reporting functions that may be called
+ * from hot paths.
+ *
+ * Note: This may conflict with instrumentation inserted on function entry which
+ * does not use __preserve_most or equivalent convention (if in assembly). Since
+ * function tracing assumes the normal C calling convention, where the attribute
+ * is supported, __preserve_most implies notrace. It is recommended to restrict
+ * use of the attribute to functions that should or already disable tracing.
+ *
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most
*/
-#ifndef __has_builtin
-#define __has_builtin(x) (0)
+#if __has_attribute(__preserve_most__) && (defined(CONFIG_X86_64) || defined(CONFIG_ARM64))
+# define __preserve_most notrace __attribute__((__preserve_most__))
+#else
+# define __preserve_most
#endif
/* Compiler specific macros. */
@@ -324,8 +350,27 @@ struct ftrace_likely_data {
# define __realloc_size(x, ...)
#endif
-#ifndef asm_volatile_goto
-#define asm_volatile_goto(x...) asm goto(x)
+/*
+ * When the size of an allocated object is needed, use the best available
+ * mechanism to find it. (For cases where sizeof() cannot be used.)
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
+#define __member_size(p) __builtin_dynamic_object_size(p, 1)
+#else
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+#endif
+
+/*
+ * Some versions of gcc do not mark 'asm goto' volatile:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
+ *
+ * We do it here by hand, because it doesn't hurt.
+ */
+#ifndef asm_goto_output
+#define asm_goto_output(x...) asm volatile goto(x)
#endif
#ifdef CONFIG_CC_HAS_ASM_INLINE
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 62b32b19e0a8..fb2915676574 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -116,6 +116,7 @@ extern bool try_wait_for_completion(struct completion *x);
extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
+extern void complete_on_current_cpu(struct completion *x);
extern void complete_all(struct completion *);
#endif
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 487350bb19c3..70bc1160f3d8 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -90,13 +90,18 @@ void cn_del_callback(const struct cb_id *id);
* If @group is not zero, then message will be delivered
* to the specified group.
* @gfp_mask: GFP mask.
+ * @filter: Filter function to be used at netlink layer.
+ * @filter_data:Filter data to be supplied to the filter function
*
* It can be safely called from softirq context, but may silently
* fail under strong memory pressure.
*
* If there are no listeners for given group %-ESRCH can be returned.
*/
-int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
+int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid,
+ u32 group, gfp_t gfp_mask,
+ netlink_filter_fn filter,
+ void *filter_data);
/**
* cn_netlink_send - Sends message to the specified groups.
diff --git a/include/linux/console.h b/include/linux/console.h
index d3195664baa5..779d388af8a0 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -101,6 +101,13 @@ extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
+struct screen_info;
+#ifdef CONFIG_VGA_CONSOLE
+void vgacon_register_screen(struct screen_info *si);
+#else
+static inline void vgacon_register_screen(struct screen_info *si) { }
+#endif
+
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
@@ -154,6 +161,10 @@ static inline int con_debug_leave(void)
* receiving the printk spam for obvious reasons.
* @CON_EXTENDED: The console supports the extended output format of
* /dev/kmesg which requires a larger output buffer.
+ * @CON_SUSPENDED: Indicates if a console is suspended. If true, the
+ * printing callbacks must not be called.
+ * @CON_NBCON: Console can operate outside of the legacy style console_lock
+ * constraints.
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
@@ -163,6 +174,112 @@ enum cons_flags {
CON_ANYTIME = BIT(4),
CON_BRL = BIT(5),
CON_EXTENDED = BIT(6),
+ CON_SUSPENDED = BIT(7),
+ CON_NBCON = BIT(8),
+};
+
+/**
+ * struct nbcon_state - console state for nbcon consoles
+ * @atom: Compound of the state fields for atomic operations
+ *
+ * @req_prio: The priority of a handover request
+ * @prio: The priority of the current owner
+ * @unsafe: Console is busy in a non takeover region
+ * @unsafe_takeover: A hostile takeover in an unsafe state happened in the
+ * past. The console cannot be safe until re-initialized.
+ * @cpu: The CPU on which the owner runs
+ *
+ * To be used for reading and preparing of the value stored in the nbcon
+ * state variable @console::nbcon_state.
+ *
+ * The @prio and @req_prio fields are particularly important to allow
+ * spin-waiting to timeout and give up without the risk of a waiter being
+ * assigned the lock after giving up.
+ */
+struct nbcon_state {
+ union {
+ unsigned int atom;
+ struct {
+ unsigned int prio : 2;
+ unsigned int req_prio : 2;
+ unsigned int unsafe : 1;
+ unsigned int unsafe_takeover : 1;
+ unsigned int cpu : 24;
+ };
+ };
+};
+
+/*
+ * The nbcon_state struct is used to easily create and interpret values that
+ * are stored in the @console::nbcon_state variable. Ensure this struct stays
+ * within the size boundaries of the atomic variable's underlying type in
+ * order to avoid any accidental truncation.
+ */
+static_assert(sizeof(struct nbcon_state) <= sizeof(int));
+
+/**
+ * nbcon_prio - console owner priority for nbcon consoles
+ * @NBCON_PRIO_NONE: Unused
+ * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
+ * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
+ * @NBCON_PRIO_PANIC: Panic output
+ * @NBCON_PRIO_MAX: The number of priority levels
+ *
+ * A higher priority context can takeover the console when it is
+ * in the safe state. The final attempt to flush consoles in panic()
+ * can be allowed to do so even in an unsafe state (Hope and pray).
+ */
+enum nbcon_prio {
+ NBCON_PRIO_NONE = 0,
+ NBCON_PRIO_NORMAL,
+ NBCON_PRIO_EMERGENCY,
+ NBCON_PRIO_PANIC,
+ NBCON_PRIO_MAX,
+};
+
+struct console;
+struct printk_buffers;
+
+/**
+ * struct nbcon_context - Context for console acquire/release
+ * @console: The associated console
+ * @spinwait_max_us: Limit for spin-wait acquire
+ * @prio: Priority of the context
+ * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
+ * be used only with NBCON_PRIO_PANIC @prio. It
+ * might cause a system freeze when the console
+ * is used later.
+ * @backlog: Ringbuffer has pending records
+ * @pbufs: Pointer to the text buffer for this context
+ * @seq: The sequence number to print for this context
+ */
+struct nbcon_context {
+ /* members set by caller */
+ struct console *console;
+ unsigned int spinwait_max_us;
+ enum nbcon_prio prio;
+ unsigned int allow_unsafe_takeover : 1;
+
+ /* members set by emit */
+ unsigned int backlog : 1;
+
+ /* members set by acquire */
+ struct printk_buffers *pbufs;
+ u64 seq;
+};
+
+/**
+ * struct nbcon_write_context - Context handed to the nbcon write callbacks
+ * @ctxt: The core console context
+ * @outbuf: Pointer to the text buffer for output
+ * @len: Length to write
+ * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
+ */
+struct nbcon_write_context {
+ struct nbcon_context __private ctxt;
+ char *outbuf;
+ unsigned int len;
+ bool unsafe_takeover;
};
/**
@@ -184,6 +301,11 @@ enum cons_flags {
* @dropped: Number of unreported dropped ringbuffer records
* @data: Driver private data
* @node: hlist node for the console list
+ *
+ * @write_atomic: Write callback for atomic context
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
*/
struct console {
char name[16];
@@ -203,6 +325,13 @@ struct console {
unsigned long dropped;
void *data;
struct hlist_node node;
+
+ /* nbcon console specific members */
+ bool (*write_atomic)(struct console *con,
+ struct nbcon_write_context *wctxt);
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
};
#ifdef CONFIG_LOCKDEP
@@ -329,6 +458,16 @@ static inline bool console_is_registered(const struct console *con)
lockdep_assert_console_list_lock_held(); \
hlist_for_each_entry(con, &console_list, node)
+#ifdef CONFIG_PRINTK
+extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
+extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+#else
+static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+#endif
+
extern int console_set_on_cmdline;
extern struct console *early_console;
@@ -337,7 +476,7 @@ enum con_flush_mode {
CONSOLE_REPLAY_ALL,
};
-extern int add_preferred_console(char *name, int idx, char *options);
+extern int add_preferred_console(const char *name, const short idx, char *options);
extern void console_force_preferred_locked(struct console *con);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
diff --git a/include/linux/const.h b/include/linux/const.h
index 435ddd72d2c4..81b8aae5a855 100644
--- a/include/linux/const.h
+++ b/include/linux/const.h
@@ -3,12 +3,4 @@
#include <vdso/const.h>
-/*
- * This returns a constant expression while determining if an argument is
- * a constant expression, most importantly without evaluating the argument.
- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
- */
-#define __is_constexpr(x) \
- (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
-
#endif /* _LINUX_CONST_H */
diff --git a/include/linux/container.h b/include/linux/container.h
index 2566a1baa736..dd00cc918a92 100644
--- a/include/linux/container.h
+++ b/include/linux/container.h
@@ -12,7 +12,7 @@
#include <linux/device.h>
/* drivers/base/power/container.c */
-extern struct bus_type container_subsys;
+extern const struct bus_type container_subsys;
struct container_dev {
struct device dev;
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index d3cbb6c16bab..6e76b9dba00e 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -119,7 +119,7 @@ extern void ct_idle_exit(void);
*/
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{
- return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
+ return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
}
/*
@@ -128,7 +128,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
*/
static __always_inline unsigned long ct_state_inc(int incby)
{
- return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
+ return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
}
static __always_inline bool warn_rcu_enter(void)
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index fdd537ea513f..bbff5f7f8803 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct context_tracking, context_tracking);
#ifdef CONFIG_CONTEXT_TRACKING_USER
static __always_inline int __ct_state(void)
{
- return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
}
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f19a47b9bb5a..a4cb7dd6ca23 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_CORESIGHT_H
#define _LINUX_CORESIGHT_H
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/perf_event.h>
@@ -41,10 +43,11 @@ enum coresight_dev_type {
CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
CORESIGHT_DEV_TYPE_HELPER,
- CORESIGHT_DEV_TYPE_ECT,
+ CORESIGHT_DEV_TYPE_MAX
};
enum coresight_dev_subtype_sink {
+ CORESIGHT_DEV_SUBTYPE_SINK_DUMMY,
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
@@ -61,17 +64,13 @@ enum coresight_dev_subtype_source {
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM,
CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS,
};
enum coresight_dev_subtype_helper {
CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
-};
-
-/* Embedded Cross Trigger (ECT) sub-types */
-enum coresight_dev_subtype_ect {
- CORESIGHT_DEV_SUBTYPE_ECT_NONE,
- CORESIGHT_DEV_SUBTYPE_ECT_CTI,
+ CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI
};
/**
@@ -84,8 +83,6 @@ enum coresight_dev_subtype_ect {
* by @coresight_dev_subtype_source.
* @helper_subtype: type of helper this component is, as defined
* by @coresight_dev_subtype_helper.
- * @ect_subtype: type of cross trigger this component is, as
- * defined by @coresight_dev_subtype_ect
*/
union coresight_dev_subtype {
/* We have some devices which acts as LINK and SINK */
@@ -95,21 +92,25 @@ union coresight_dev_subtype {
};
enum coresight_dev_subtype_source source_subtype;
enum coresight_dev_subtype_helper helper_subtype;
- enum coresight_dev_subtype_ect ect_subtype;
};
/**
* struct coresight_platform_data - data harvested from the firmware
* specification.
*
- * @nr_inport: Number of elements for the input connections.
- * @nr_outport: Number of elements for the output connections.
- * @conns: Sparse array of nr_outport connections from this component.
+ * @nr_inconns: Number of elements for the input connections.
+ * @nr_outconns: Number of elements for the output connections.
+ * @out_conns: Array of nr_outconns pointers to connections from this
+ * component.
+ * @in_conns: Sparse array of pointers to input connections. Sparse
+ * because the source device owns the connection so when it's
+ * unloaded the connection leaves an empty slot.
*/
struct coresight_platform_data {
- int nr_inport;
- int nr_outport;
- struct coresight_connection *conns;
+ int nr_inconns;
+ int nr_outconns;
+ struct coresight_connection **out_conns;
+ struct coresight_connection **in_conns;
};
/**
@@ -164,19 +165,42 @@ struct coresight_desc {
/**
* struct coresight_connection - representation of a single connection
- * @outport: a connection's output port number.
- * @child_port: remote component's port number @output is connected to.
- * @chid_fwnode: remote component's fwnode handle.
- * @child_dev: a @coresight_device representation of the component
- connected to @outport.
+ * @src_port: a connection's output port number.
+ * @dest_port: destination's input port number @src_port is connected to.
+ * @dest_fwnode: destination component's fwnode handle.
+ * @dest_dev: a @coresight_device representation of the component
+ connected to @src_port. NULL until the device is created
* @link: Representation of the connection as a sysfs link.
+ *
+ * The full connection structure looks like this, where in_conns store
+ * references to same connection as the source device's out_conns.
+ *
+ * +-----------------------------+ +-----------------------------+
+ * |coresight_device | |coresight_connection |
+ * |-----------------------------| |-----------------------------|
+ * | | | |
+ * | | | dest_dev*|<--
+ * |pdata->out_conns[nr_outconns]|<->|src_dev* | |
+ * | | | | |
+ * +-----------------------------+ +-----------------------------+ |
+ * |
+ * +-----------------------------+ |
+ * |coresight_device | |
+ * |------------------------------ |
+ * | | |
+ * | pdata->in_conns[nr_inconns]|<--
+ * | |
+ * +-----------------------------+
*/
struct coresight_connection {
- int outport;
- int child_port;
- struct fwnode_handle *child_fwnode;
- struct coresight_device *child_dev;
+ int src_port;
+ int dest_port;
+ struct fwnode_handle *dest_fwnode;
+ struct coresight_device *dest_dev;
struct coresight_sysfs_link *link;
+ struct coresight_device *src_dev;
+ atomic_t src_refcnt;
+ atomic_t dest_refcnt;
};
/**
@@ -211,8 +235,6 @@ struct coresight_sysfs_link {
* from source to that sink.
* @ea: Device attribute for sink representation under PMU directory.
* @def_sink: cached reference to default sink found for this device.
- * @ect_dev: Associated cross trigger device. Not part of the trace data
- * path or connections.
* @nr_links: number of sysfs links created to other components from this
* device. These will appear in the "connections" group.
* @has_conns_grp: Have added a "connections" group for sysfs links.
@@ -228,19 +250,16 @@ struct coresight_device {
const struct coresight_ops *ops;
struct csdev_access access;
struct device dev;
- atomic_t *refcnt;
+ atomic_t refcnt;
bool orphan;
bool enable; /* true only if configured as part of a path */
/* sink specific fields */
bool activated; /* true only if a sink is part of a path */
struct dev_ext_attribute *ea;
struct coresight_device *def_sink;
- /* cross trigger handling */
- struct coresight_device *ect_dev;
/* sysfs links between components */
int nr_links;
bool has_conns_grp;
- bool ect_enabled; /* true only if associated ect device is enabled */
/* system configuration and feature lists */
struct list_head feature_csdev_list;
struct list_head config_csdev_list;
@@ -272,6 +291,12 @@ static struct coresight_dev_list (var) = { \
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+enum cs_mode {
+ CS_MODE_DISABLED,
+ CS_MODE_SYSFS,
+ CS_MODE_PERF,
+};
+
#define source_ops(csdev) csdev->ops->source_ops
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
@@ -288,7 +313,8 @@ static struct coresight_dev_list (var) = { \
* @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
- int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ void *data);
int (*disable)(struct coresight_device *csdev);
void *(*alloc_buffer)(struct coresight_device *csdev,
struct perf_event *event, void **pages,
@@ -306,8 +332,12 @@ struct coresight_ops_sink {
* @disable: disables flow between iport and oport.
*/
struct coresight_ops_link {
- int (*enable)(struct coresight_device *csdev, int iport, int oport);
- void (*disable)(struct coresight_device *csdev, int iport, int oport);
+ int (*enable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
+ void (*disable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
};
/**
@@ -320,8 +350,8 @@ struct coresight_ops_link {
*/
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
- int (*enable)(struct coresight_device *csdev,
- struct perf_event *event, u32 mode);
+ int (*enable)(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
};
@@ -336,27 +366,16 @@ struct coresight_ops_source {
* @disable : Disable the device
*/
struct coresight_ops_helper {
- int (*enable)(struct coresight_device *csdev, void *data);
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ void *data);
int (*disable)(struct coresight_device *csdev, void *data);
};
-/**
- * struct coresight_ops_ect - Ops for an embedded cross trigger device
- *
- * @enable : Enable the device
- * @disable : Disable the device
- */
-struct coresight_ops_ect {
- int (*enable)(struct coresight_device *csdev);
- int (*disable)(struct coresight_device *csdev);
-};
-
struct coresight_ops {
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
const struct coresight_ops_helper *helper_ops;
- const struct coresight_ops_ect *ect_ops;
};
#if IS_ENABLED(CONFIG_CORESIGHT)
@@ -370,6 +389,63 @@ static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
return csa->read(offset, true, false);
}
+#define CORESIGHT_CIDRn(i) (0xFF0 + ((i) * 4))
+
+static inline u32 coresight_get_cid(void __iomem *base)
+{
+ u32 i, cid = 0;
+
+ for (i = 0; i < 4; i++)
+ cid |= readl(base + CORESIGHT_CIDRn(i)) << (i * 8);
+
+ return cid;
+}
+
+static inline bool is_coresight_device(void __iomem *base)
+{
+ u32 cid = coresight_get_cid(base);
+
+ return cid == CORESIGHT_CID;
+}
+
+/*
+ * Attempt to find and enable "APB clock" for the given device
+ *
+ * Returns:
+ *
+ * clk - Clock is found and enabled
+ * NULL - clock is not found
+ * ERROR - Clock is found but failed to enable
+ */
+static inline struct clk *coresight_get_enable_apb_pclk(struct device *dev)
+{
+ struct clk *pclk;
+ int ret;
+
+ pclk = clk_get(dev, "apb_pclk");
+ if (IS_ERR(pclk))
+ return NULL;
+
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
+ clk_put(pclk);
+ return ERR_PTR(ret);
+ }
+ return pclk;
+}
+
+#define CORESIGHT_PIDRn(i) (0xFE0 + ((i) * 4))
+
+static inline u32 coresight_get_pid(struct csdev_access *csa)
+{
+ u32 i, pid = 0;
+
+ for (i = 0; i < 4; i++)
+ pid |= csdev_access_relaxed_read32(csa, CORESIGHT_PIDRn(i)) << (i * 8);
+
+ return pid;
+}
+
static inline u64 csdev_access_relaxed_read_pair(struct csdev_access *csa,
u32 lo_offset, u32 hi_offset)
{
@@ -602,5 +678,18 @@ static inline void coresight_write64(struct coresight_device *csdev, u64 val, u3
extern int coresight_get_cpu(struct device *dev);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
+struct coresight_connection *
+coresight_add_out_conn(struct device *dev,
+ struct coresight_platform_data *pdata,
+ const struct coresight_connection *new_conn);
+int coresight_add_in_conn(struct coresight_connection *conn);
+struct coresight_device *
+coresight_find_input_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
+struct coresight_device *
+coresight_find_output_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/counter.h b/include/linux/counter.h
index b63746637de2..702e9108bbb4 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -399,7 +399,7 @@ struct counter_device {
struct mutex ops_exist_lock;
};
-void *counter_priv(const struct counter_device *const counter);
+void *counter_priv(const struct counter_device *const counter) __attribute_const__;
struct counter_device *counter_alloc(size_t sizeof_priv);
void counter_put(struct counter_device *const counter);
diff --git a/include/linux/cper.h b/include/linux/cper.h
index c1a7dc325121..265b0f8fc0b3 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -90,6 +90,29 @@ enum {
GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
0x72, 0x2D, 0xEB, 0x41)
+/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CPER_SEC_CXL_GEN_MEDIA_GUID \
+ GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
+ 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
+/*
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CPER_SEC_CXL_DRAM_GUID \
+ GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
+ 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CPER_SEC_CXL_MEM_MODULE_GUID \
+ GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
+ 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
+
/*
* Flags bits definitions for flags in struct cper_record_header
* If set, the error has been recovered
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 8582a7142623..dcb89c987164 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -18,6 +18,7 @@
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <linux/cpuhotplug.h>
+#include <linux/cpu_smt.h>
struct device;
struct device_node;
@@ -70,17 +71,28 @@ extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
char *buf);
extern ssize_t cpu_show_retbleed(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_gds(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
+extern bool arch_cpu_is_hotpluggable(int cpu);
+extern int arch_register_cpu(int cpu);
+extern void arch_unregister_cpu(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+DECLARE_PER_CPU(struct cpu, cpu_devices);
+#endif
+
/*
* These states are not related to the core CPU hotplug mechanism. They are
* used by various (sub)architectures to track internal state
@@ -148,6 +160,8 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
#endif /* !CONFIG_HOTPLUG_CPU */
+DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
+
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
extern void thaw_secondary_cpus(void);
@@ -184,9 +198,12 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void __noreturn arch_cpu_idle_dead(void);
-int cpu_report_state(int cpu);
-int cpu_check_up_prepare(int cpu);
-void cpu_set_state_online(int cpu);
+#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
+void arch_cpu_finalize_init(void);
+#else
+static inline void arch_cpu_finalize_init(void) { }
+#endif
+
void play_idle_precise(u64 duration_ns, u64 latency_ns);
static inline void play_idle(unsigned long duration_us)
@@ -195,37 +212,11 @@ static inline void play_idle(unsigned long duration_us)
}
#ifdef CONFIG_HOTPLUG_CPU
-bool cpu_wait_death(unsigned int cpu, int seconds);
-bool cpu_report_death(void);
void cpuhp_report_idle_dead(void);
#else
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-enum cpuhp_smt_control {
- CPU_SMT_ENABLED,
- CPU_SMT_DISABLED,
- CPU_SMT_FORCE_DISABLED,
- CPU_SMT_NOT_SUPPORTED,
- CPU_SMT_NOT_IMPLEMENTED,
-};
-
-#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
-extern enum cpuhp_smt_control cpu_smt_control;
-extern void cpu_smt_disable(bool force);
-extern void cpu_smt_check_topology(void);
-extern bool cpu_smt_possible(void);
-extern int cpuhp_smt_enable(void);
-extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
-#else
-# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
-static inline void cpu_smt_disable(bool force) { }
-static inline void cpu_smt_check_topology(void) { }
-static inline bool cpu_smt_possible(void) { return false; }
-static inline int cpuhp_smt_enable(void) { return 0; }
-static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
-#endif
-
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
diff --git a/include/linux/cpu_smt.h b/include/linux/cpu_smt.h
new file mode 100644
index 000000000000..0c1664294b57
--- /dev/null
+++ b/include/linux/cpu_smt.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CPU_SMT_H_
+#define _LINUX_CPU_SMT_H_
+
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED,
+ CPU_SMT_DISABLED,
+ CPU_SMT_FORCE_DISABLED,
+ CPU_SMT_NOT_SUPPORTED,
+ CPU_SMT_NOT_IMPLEMENTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern unsigned int cpu_smt_num_threads;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads);
+extern bool cpu_smt_possible(void);
+extern int cpuhp_smt_enable(void);
+extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
+#else
+# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
+# define cpu_smt_num_threads 1
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads) { }
+static inline bool cpu_smt_possible(void) { return false; }
+static inline int cpuhp_smt_enable(void) { return 0; }
+static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
+#endif
+
+#endif /* _LINUX_CPU_SMT_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 26e2eb399484..afda5f24d3dd 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -19,6 +19,7 @@
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
+#include <linux/minmax.h>
/*********************************************************************
* CPUFREQ INTERFACE *
@@ -140,6 +141,9 @@ struct cpufreq_policy {
*/
bool dvfs_possible_from_any_cpu;
+ /* Per policy boost enabled flag. */
+ bool boost_enabled;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
unsigned int cached_resolved_idx;
@@ -340,7 +344,10 @@ struct cpufreq_driver {
/*
* ->fast_switch() replacement for drivers that use an internal
* representation of performance levels and can pass hints other than
- * the target performance level to the hardware.
+ * the target performance level to the hardware. This can only be set
+ * if ->fast_switch is set too, because in those cases (under specific
+ * conditions) scale invariance can be disabled, which causes the
+ * schedutil governor to fall back to the latter.
*/
void (*adjust_perf)(unsigned int cpu,
unsigned long min_perf,
@@ -367,7 +374,7 @@ struct cpufreq_driver {
int (*target_intermediate)(struct cpufreq_policy *policy,
unsigned int index);
- /* should be defined, if possible */
+ /* should be defined, if possible, return 0 on error */
unsigned int (*get)(unsigned int cpu);
/* Called to update policy limits on firmware notifications. */
@@ -464,17 +471,8 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *poli
unsigned int min,
unsigned int max)
{
- if (policy->min < min)
- policy->min = min;
- if (policy->max < min)
- policy->max = min;
- if (policy->min > max)
- policy->min = max;
- if (policy->max > max)
- policy->max = max;
- if (policy->min > policy->max)
- policy->min = policy->max;
- return;
+ policy->max = clamp(policy->max, min, max);
+ policy->min = clamp(policy->min, min, policy->max);
}
static inline void
@@ -1195,14 +1193,6 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_
}
#endif
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov);
-#else
-static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov) { }
-#endif
-
extern unsigned int arch_freq_get_on_cpu(int cpu);
#ifndef arch_set_freq_scale
@@ -1213,6 +1203,7 @@ void arch_set_freq_scale(const struct cpumask *cpus,
{
}
#endif
+
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 3ceb9dfa0993..172d0a743e5d 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -48,7 +48,7 @@
* same section.
*
* If neither #1 nor #2 apply, please use the dynamic state space when
- * setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE
+ * setting up a state by using CPUHP_BP_PREPARE_DYN or CPUHP_AP_ONLINE_DYN
* for the @state argument of the setup function.
*
* See Documentation/core-api/cpu_hotplug.rst for further information and
@@ -66,15 +66,12 @@ enum cpuhp_state {
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
- CPUHP_X86_APB_DEAD,
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_IBMVNIC_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
- /* Must be after CPUHP_MM_VMSTAT_DEAD */
- CPUHP_MM_DEMOTION_DEAD,
CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
@@ -90,14 +87,12 @@ enum cpuhp_state {
CPUHP_FS_BUFF_DEAD,
CPUHP_PRINTK_DEAD,
CPUHP_MM_MEMCQ_DEAD,
- CPUHP_XFS_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
- CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
CPUHP_AP_DTPM_CPU_DEAD,
@@ -109,7 +104,6 @@ enum cpuhp_state {
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
- CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
CPUHP_CPUIDLE_COUPLED_PREPARE,
@@ -119,13 +113,11 @@ enum cpuhp_state {
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
- CPUHP_NET_FLOW_PREPARE,
CPUHP_TOPOLOGY_PREPARE,
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
CPUHP_MM_ZS_PREPARE,
- CPUHP_MM_ZSWP_MEM_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
@@ -133,6 +125,7 @@ enum cpuhp_state {
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
+ CPUHP_BP_KICK_AP,
CPUHP_BRINGUP_CPU,
/*
@@ -151,18 +144,14 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
- CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CQM_STARTING,
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
- CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
@@ -172,13 +161,13 @@ enum cpuhp_state {
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
- CPUHP_AP_MARCO_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
@@ -189,10 +178,12 @@ enum cpuhp_state {
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
+ CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
+ CPUHP_AP_HRTIMERS_DYING,
CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
@@ -204,7 +195,6 @@ enum cpuhp_state {
CPUHP_AP_KVM_ONLINE,
CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
- CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
CPUHP_AP_BLK_MQ_ONLINE,
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
@@ -215,9 +205,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
- CPUHP_AP_PERF_X86_IDXD_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
@@ -249,9 +237,7 @@ enum cpuhp_state {
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
- CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
- /* Must be after CPUHP_AP_ONLINE_DYN for node_states[N_CPU] update */
- CPUHP_AP_MM_DEMOTION_ONLINE,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_ACTIVE,
@@ -518,4 +504,20 @@ void cpuhp_online_idle(enum cpuhp_state state);
static inline void cpuhp_online_idle(enum cpuhp_state state) { }
#endif
+struct task_struct;
+
+void cpuhp_ap_sync_alive(void);
+void arch_cpuhp_sync_state_poll(void);
+void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu);
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle);
+bool arch_cpuhp_init_parallel_bringup(void);
+
+#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
+void cpuhp_ap_report_dead(void);
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu);
+#else
+static inline void cpuhp_ap_report_dead(void) { }
+static inline void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
+#endif
+
#endif
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index ca736b05ec7b..cfb545841a2c 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -4,7 +4,7 @@
/*
* Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number. In general,
+ * set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
#include <linux/kernel.h>
@@ -97,7 +97,7 @@ static inline void set_nr_cpu_ids(unsigned int nr)
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
- * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
+ * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs
* that it is possible might ever be plugged in at anytime during the
* life of that system boot. The cpu_present_mask is dynamic(*),
* representing which CPUs are currently plugged in. And
@@ -112,7 +112,7 @@ static inline void set_nr_cpu_ids(unsigned int nr)
* hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
*
* Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
* assumption that their single CPU is online. The UP
* cpu_{online,possible,present}_masks are placebos. Changing them
* will have no useful affect on the following num_*_cpus()
@@ -155,7 +155,7 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
* cpumask_first - get the first cpu in a cpumask
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
static inline unsigned int cpumask_first(const struct cpumask *srcp)
{
@@ -166,7 +166,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
* cpumask_first_zero - get the first unset cpu in a cpumask
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if all cpus are set.
+ * Return: >= nr_cpu_ids if all cpus are set.
*/
static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
@@ -175,10 +175,10 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
/**
* cpumask_first_and - return the first cpu from *srcp1 & *srcp2
- * @src1p: the first input
- * @src2p: the second input
+ * @srcp1: the first input
+ * @srcp2: the second input
*
- * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
+ * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
static inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
@@ -190,7 +190,7 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
* cpumask_last - get the last CPU in a cpumask
* @srcp: - the cpumask pointer
*
- * Returns >= nr_cpumask_bits if no CPUs set.
+ * Return: >= nr_cpumask_bits if no CPUs set.
*/
static inline unsigned int cpumask_last(const struct cpumask *srcp)
{
@@ -199,10 +199,10 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
/**
* cpumask_next - get the next cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus set.
+ * Return: >= nr_cpu_ids if no further cpus set.
*/
static inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
@@ -215,10 +215,10 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus unset.
+ * Return: >= nr_cpu_ids if no further cpus unset.
*/
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
@@ -254,11 +254,11 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp);
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus set in both.
+ * Return: >= nr_cpu_ids if no further cpus set in both.
*/
static inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
@@ -373,7 +373,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
static inline
unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
@@ -388,11 +388,11 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
}
/**
- * cpumask_nth - get the first cpu in a cpumask
+ * cpumask_nth - get the Nth cpu in a cpumask
* @srcp: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
{
@@ -400,12 +400,12 @@ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *s
}
/**
- * cpumask_nth_and - get the first cpu in 2 cpumasks
+ * cpumask_nth_and - get the Nth cpu in 2 cpumasks
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static inline
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
@@ -416,12 +416,12 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
}
/**
- * cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd.
+ * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static inline
unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
@@ -436,9 +436,9 @@ unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
* @srcp3: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static __always_inline
unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
@@ -497,7 +497,7 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns true if @cpu is set in @cpumask, else returns false
+ * Return: true if @cpu is set in @cpumask, else returns false
*/
static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
@@ -509,9 +509,9 @@ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpum
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
- *
* test_and_set_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
@@ -523,9 +523,9 @@ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cp
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
- *
* test_and_clear_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
@@ -560,7 +560,7 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns false, else returns true
+ * Return: false if *@dstp is empty, else returns true
*/
static inline bool cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
@@ -603,7 +603,7 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns false, else returns true
+ * Return: false if *@dstp is empty, else returns true
*/
static inline bool cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
@@ -617,6 +617,8 @@ static inline bool cpumask_andnot(struct cpumask *dstp,
* cpumask_equal - *src1p == *src2p
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if the cpumasks are equal, false if not
*/
static inline bool cpumask_equal(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -630,6 +632,9 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
* @src3p: the third input
+ *
+ * Return: true if first cpumask ORed with second cpumask == third cpumask,
+ * otherwise false
*/
static inline bool cpumask_or_equal(const struct cpumask *src1p,
const struct cpumask *src2p,
@@ -643,6 +648,9 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p,
* cpumask_intersects - (*src1p & *src2p) != 0
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if first cpumask ANDed with second cpumask is non-empty,
+ * otherwise false
*/
static inline bool cpumask_intersects(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -656,7 +664,7 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*
- * Returns true if *@src1p is a subset of *@src2p, else returns false
+ * Return: true if *@src1p is a subset of *@src2p, else returns false
*/
static inline bool cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -668,6 +676,8 @@ static inline bool cpumask_subset(const struct cpumask *src1p,
/**
* cpumask_empty - *srcp == 0
* @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
+ *
+ * Return: true if srcp is empty (has no bits set), else false
*/
static inline bool cpumask_empty(const struct cpumask *srcp)
{
@@ -677,6 +687,8 @@ static inline bool cpumask_empty(const struct cpumask *srcp)
/**
* cpumask_full - *srcp == 0xFFFFFFFF...
* @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
+ *
+ * Return: true if srcp is full (has all bits set), else false
*/
static inline bool cpumask_full(const struct cpumask *srcp)
{
@@ -686,6 +698,8 @@ static inline bool cpumask_full(const struct cpumask *srcp)
/**
* cpumask_weight - Count of bits in *srcp
* @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in *srcp
*/
static inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
@@ -696,6 +710,8 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
* cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
* @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
* @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
*/
static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
const struct cpumask *srcp2)
@@ -744,7 +760,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* cpumask_any - pick a "random" cpu from *srcp
* @srcp: the input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any(srcp) cpumask_first(srcp)
@@ -753,7 +769,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* @mask1: the first input cpumask
* @mask2: the second input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
@@ -769,7 +785,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
@@ -783,7 +799,7 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parselist_user(const char __user *buf, int len,
struct cpumask *dstp)
@@ -797,7 +813,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
@@ -809,7 +825,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
@@ -817,7 +833,9 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
}
/**
- * cpumask_size - size to allocate for a 'struct cpumask' in bytes
+ * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes
+ *
+ * Return: size to allocate for a &struct cpumask in bytes
*/
static inline unsigned int cpumask_size(void)
{
@@ -831,7 +849,7 @@ static inline unsigned int cpumask_size(void)
* little more difficult, we typedef cpumask_var_t to an array or a
* pointer: doing &mask on an array is a noop, so it still works.
*
- * ie.
+ * i.e.
* cpumask_var_t tmpmask;
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
* return -ENOMEM;
@@ -887,6 +905,8 @@ bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
* a nop returning a constant 1 (in <linux/cpumask.h>).
*
* See alloc_cpumask_var_node.
+ *
+ * Return: %true if allocation succeeded, %false if not
*/
static inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
@@ -1025,7 +1045,7 @@ set_cpu_dying(unsigned int cpu, bool dying)
}
/**
- * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
*
* There are a few places where cpumask_var_t isn't appropriate and
@@ -1068,10 +1088,12 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
* interface gives only a momentary snapshot and is not protected against
* concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
* region.
+ *
+ * Return: momentary snapshot of the number of online CPUs
*/
static __always_inline unsigned int num_online_cpus(void)
{
- return arch_atomic_read(&__num_online_cpus);
+ return raw_atomic_read(&__num_online_cpus);
}
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
@@ -1160,7 +1182,7 @@ static inline bool cpu_dying(unsigned int cpu)
* @mask: the cpumask to copy
* @buf: the buffer to copy into
*
- * Returns the length of the (null-terminated) @buf string, zero if
+ * Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
static inline ssize_t
@@ -1183,7 +1205,7 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
* cpumask; Typically used by bin_attribute to export cpumask bitmask
* ABI.
*
- * Returns the length of how many bytes have been copied, excluding
+ * Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
static inline ssize_t
@@ -1197,9 +1219,16 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
/**
* cpumap_print_list_to_buf - copies the cpumask into the buffer as
* comma-separated list of cpus
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
*
* Everything is same with the above cpumap_print_bitmask_to_buf()
* except the print format.
+ *
+ * Return: the length of how many bytes have been copied, excluding
+ * terminating '\0'.
*/
static inline ssize_t
cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 980b76a1237e..875d12598bd2 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -71,10 +71,13 @@ extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
-extern void cpuset_read_lock(void);
-extern void cpuset_read_unlock(void);
+extern void inc_dl_tasks_cs(struct task_struct *task);
+extern void dec_dl_tasks_cs(struct task_struct *task);
+extern void cpuset_lock(void);
+extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -189,8 +192,10 @@ static inline void cpuset_update_active_cpus(void)
static inline void cpuset_wait_for_hotplug(void) { }
-static inline void cpuset_read_lock(void) { }
-static inline void cpuset_read_unlock(void) { }
+static inline void inc_dl_tasks_cs(struct task_struct *task) { }
+static inline void dec_dl_tasks_cs(struct task_struct *task) { }
+static inline void cpuset_lock(void) { }
+static inline void cpuset_unlock(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
@@ -203,6 +208,11 @@ static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
return false;
}
+static inline bool cpuset_cpu_is_isolated(int cpu)
+{
+ return false;
+}
+
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
return node_possible_map;
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index de62a722431e..9eaeaafe0cad 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -5,6 +5,14 @@
#include <linux/linkage.h>
#include <linux/elfcore.h>
#include <linux/elf.h>
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#include <asm/crash_core.h>
+#endif
+
+/* Location of a reserved region to hold the crash kernel.
+ */
+extern struct resource crashk_res;
+extern struct resource crashk_low_res;
#define CRASH_CORE_NOTE_NAME "CORE"
#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
@@ -28,6 +36,8 @@
VMCOREINFO_BYTES)
typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
+/* Per cpu memory for storing cpu states in case of system crash. */
+extern note_buf_t __percpu *crash_notes;
void crash_update_vmcoreinfo_safecopy(void *ptr);
void crash_save_vmcoreinfo(void);
@@ -78,10 +88,60 @@ Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void final_note(Elf_Word *buf);
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
+ unsigned long long *crash_size, unsigned long long *crash_base,
+ unsigned long long *low_size, bool *high);
+
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
+#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+#endif
+#ifndef CRASH_ALIGN
+#define CRASH_ALIGN SZ_2M
+#endif
+#ifndef CRASH_ADDR_LOW_MAX
+#define CRASH_ADDR_LOW_MAX SZ_4G
+#endif
+#ifndef CRASH_ADDR_HIGH_MAX
+#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
+#endif
+
+void __init reserve_crashkernel_generic(char *cmdline,
+ unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high);
+#else
+static inline void __init reserve_crashkernel_generic(char *cmdline,
+ unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high)
+{}
+#endif
+
+/* Alignment required for elf header segment */
+#define ELF_CORE_HEADER_ALIGN 4096
+
+struct crash_mem {
+ unsigned int max_nr_ranges;
+ unsigned int nr_ranges;
+ struct range ranges[] __counted_by(max_nr_ranges);
+};
+
+extern int crash_exclude_mem_range(struct crash_mem *mem,
+ unsigned long long mstart,
+ unsigned long long mend);
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
+ void **addr, unsigned long *sz);
+
+struct kimage;
+struct kexec_segment;
+
+#define KEXEC_CRASH_HP_NONE 0
+#define KEXEC_CRASH_HP_ADD_CPU 1
+#define KEXEC_CRASH_HP_REMOVE_CPU 2
+#define KEXEC_CRASH_HP_ADD_MEMORY 3
+#define KEXEC_CRASH_HP_REMOVE_MEMORY 4
+#define KEXEC_CRASH_HP_INVALID_CPU -1U
#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 0f3a656293b0..acc55626afdc 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -50,6 +50,7 @@ void vmcore_cleanup(void);
#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
#endif
+#ifndef is_kdump_kernel
/*
* is_kdump_kernel() checks whether this kernel is booting after a panic of
* previous kernel or not. This is determined by checking if previous kernel
@@ -64,6 +65,7 @@ static inline bool is_kdump_kernel(void)
{
return elfcorehdr_addr != ELFCORE_ADDR_MAX;
}
+#endif
/* is_vmcore_usable() checks if the kernel is booting after a panic and
* the vmcore region is usable.
@@ -75,7 +77,8 @@ static inline bool is_kdump_kernel(void)
static inline int is_vmcore_usable(void)
{
- return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_ERR &&
+ elfcorehdr_addr != ELFCORE_ADDR_MAX ? 1 : 0;
}
/* vmcore_unusable() marks the vmcore as unusable,
@@ -84,8 +87,7 @@ static inline int is_vmcore_usable(void)
static inline void vmcore_unusable(void)
{
- if (is_kdump_kernel())
- elfcorehdr_addr = ELFCORE_ADDR_ERR;
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
}
/**
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index 72c92c396bb8..cd4f420231ba 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,19 +5,12 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
-extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
-extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
-static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
-{
- return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
-}
-
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 9ed9232af934..2976f534a7a3 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/key.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/uidgid.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
@@ -23,7 +24,7 @@ struct inode;
* COW Supplementary groups list
*/
struct group_info {
- atomic_t usage;
+ refcount_t usage;
int ngroups;
kgid_t gid[];
} __randomize_layout;
@@ -39,7 +40,7 @@ struct group_info {
*/
static inline struct group_info *get_group_info(struct group_info *gi)
{
- atomic_inc(&gi->usage);
+ refcount_inc(&gi->usage);
return gi;
}
@@ -49,7 +50,7 @@ static inline struct group_info *get_group_info(struct group_info *gi)
*/
#define put_group_info(group_info) \
do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
+ if (refcount_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
@@ -108,14 +109,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp)
* same context as task->real_cred.
*/
struct cred {
- atomic_t usage;
-#ifdef CONFIG_DEBUG_CREDENTIALS
- atomic_t subscribers; /* number of processes subscribed */
- void *put_addr;
- unsigned magic;
-#define CRED_MAGIC 0x43736564
-#define CRED_MAGIC_DEAD 0x44656144
-#endif
+ atomic_long_t usage;
kuid_t uid; /* real UID of the task */
kgid_t gid; /* real GID of the task */
kuid_t suid; /* saved UID of the task */
@@ -164,7 +158,6 @@ extern void abort_creds(struct cred *);
extern const struct cred *override_creds(const struct cred *);
extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
-extern int change_create_files_as(struct cred *, struct inode *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
@@ -172,46 +165,6 @@ extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
extern int set_cred_ucounts(struct cred *);
-/*
- * check for validity of credentials
- */
-#ifdef CONFIG_DEBUG_CREDENTIALS
-extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned);
-extern void __validate_process_creds(struct task_struct *,
- const char *, unsigned);
-
-extern bool creds_are_invalid(const struct cred *cred);
-
-static inline void __validate_creds(const struct cred *cred,
- const char *file, unsigned line)
-{
- if (unlikely(creds_are_invalid(cred)))
- __invalid_creds(cred, file, line);
-}
-
-#define validate_creds(cred) \
-do { \
- __validate_creds((cred), __FILE__, __LINE__); \
-} while(0)
-
-#define validate_process_creds() \
-do { \
- __validate_process_creds(current, __FILE__, __LINE__); \
-} while(0)
-
-extern void validate_creds_for_do_exit(struct task_struct *);
-#else
-static inline void validate_creds(const struct cred *cred)
-{
-}
-static inline void validate_creds_for_do_exit(struct task_struct *tsk)
-{
-}
-static inline void validate_process_creds(void)
-{
-}
-#endif
-
static inline bool cap_ambient_invariant_ok(const struct cred *cred)
{
return cap_issubset(cred->cap_ambient,
@@ -220,6 +173,20 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
}
/**
+ * get_new_cred_many - Get references on a new set of credentials
+ * @cred: The new credentials to reference
+ * @nr: Number of references to acquire
+ *
+ * Get references on the specified set of new credentials. The caller must
+ * release all acquired references.
+ */
+static inline struct cred *get_new_cred_many(struct cred *cred, int nr)
+{
+ atomic_long_add(nr, &cred->usage);
+ return cred;
+}
+
+/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
*
@@ -228,16 +195,16 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
*/
static inline struct cred *get_new_cred(struct cred *cred)
{
- atomic_inc(&cred->usage);
- return cred;
+ return get_new_cred_many(cred, 1);
}
/**
- * get_cred - Get a reference on a set of credentials
+ * get_cred_many - Get references on a set of credentials
* @cred: The credentials to reference
+ * @nr: Number of references to acquire
*
- * Get a reference on the specified set of credentials. The caller must
- * release the reference. If %NULL is passed, it is returned with no action.
+ * Get references on the specified set of credentials. The caller must release
+ * all acquired reference. If %NULL is passed, it is returned with no action.
*
* This is used to deal with a committed set of credentials. Although the
* pointer is const, this will temporarily discard the const and increment the
@@ -245,14 +212,27 @@ static inline struct cred *get_new_cred(struct cred *cred)
* accidental alteration of a set of credentials that should be considered
* immutable.
*/
-static inline const struct cred *get_cred(const struct cred *cred)
+static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
{
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return cred;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
- return get_new_cred(nonconst_cred);
+ return get_new_cred_many(nonconst_cred, nr);
+}
+
+/*
+ * get_cred - Get a reference on a set of credentials
+ * @cred: The credentials to reference
+ *
+ * Get a reference on the specified set of credentials. The caller must
+ * release the reference. If %NULL is passed, it is returned with no action.
+ *
+ * This is used to deal with a committed set of credentials.
+ */
+static inline const struct cred *get_cred(const struct cred *cred)
+{
+ return get_cred_many(cred, 1);
}
static inline const struct cred *get_cred_rcu(const struct cred *cred)
@@ -260,9 +240,8 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return NULL;
- if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
return NULL;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
return cred;
}
@@ -270,6 +249,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
+ * @nr: Number of references to release
*
* Release a reference to a set of credentials, deleting them when the last ref
* is released. If %NULL is passed, nothing is done.
@@ -278,17 +258,28 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
* on task_struct are attached by const pointers to prevent accidental
* alteration of otherwise immutable credential sets.
*/
-static inline void put_cred(const struct cred *_cred)
+static inline void put_cred_many(const struct cred *_cred, int nr)
{
struct cred *cred = (struct cred *) _cred;
if (cred) {
- validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
+ if (atomic_long_sub_and_test(nr, &cred->usage))
__put_cred(cred);
}
}
+/*
+ * put_cred - Release a reference to a set of credentials
+ * @cred: The credentials to release
+ *
+ * Release a reference to a set of credentials, deleting them when the last ref
+ * is released. If %NULL is passed, nothing is done.
+ */
+static inline void put_cred(const struct cred *cred)
+{
+ put_cred_many(cred, 1);
+}
+
/**
* current_cred - Access the current task's subjective credentials
*
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index fa310ac1db59..b164da5e129e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -24,18 +24,18 @@
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
+#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006
+#define CRYPTO_ALG_TYPE_SIG 0x00000007
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
-#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_HASH 0x0000000e
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
-#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -110,7 +110,6 @@
* crypto_aead_walksize() (with the remainder going at the end), no chunk
* can cross a page boundary or a scatterlist element boundary.
* ahash:
- * - The result buffer must be aligned to the algorithm's alignmask.
* - crypto_ahash_finup() must not be used unless the algorithm implements
* ->finup() natively.
*/
@@ -278,18 +277,20 @@ struct compress_alg {
* @cra_ctxsize: Size of the operational context of the transformation. This
* value informs the kernel crypto API about the memory size
* needed to be allocated for the transformation context.
- * @cra_alignmask: Alignment mask for the input and output data buffer. The data
- * buffer containing the input data for the algorithm must be
- * aligned to this alignment mask. The data buffer for the
- * output data must be aligned to this alignment mask. Note that
- * the Crypto API will do the re-alignment in software, but
- * only under special conditions and there is a performance hit.
- * The re-alignment happens at these occasions for different
- * @cra_u types: cipher -- For both input data and output data
- * buffer; ahash -- For output hash destination buf; shash --
- * For output hash destination buf.
- * This is needed on hardware which is flawed by design and
- * cannot pick data from arbitrary addresses.
+ * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
+ * 1 less than the alignment, in bytes, that the algorithm
+ * implementation requires for input and output buffers. When
+ * the crypto API is invoked with buffers that are not aligned
+ * to this alignment, the crypto API automatically utilizes
+ * appropriately aligned temporary buffers to comply with what
+ * the algorithm needs. (For scatterlists this happens only if
+ * the algorithm uses the skcipher_walk helper functions.) This
+ * misalignment handling carries a performance penalty, so it is
+ * preferred that algorithms do not set a nonzero alignmask.
+ * Also, crypto API users may wish to allocate buffers aligned
+ * to the alignmask of the algorithm being used, in order to
+ * avoid the API having to realign them. Note: the alignmask is
+ * not supported for hash algorithms and is always 0 for them.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest
diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h
new file mode 100644
index 000000000000..03fa6d50d46f
--- /dev/null
+++ b/include/linux/cxl-event.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Intel Corporation. */
+#ifndef _LINUX_CXL_EVENT_H
+#define _LINUX_CXL_EVENT_H
+
+/*
+ * Common Event Record Format
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_hdr {
+ u8 length;
+ u8 flags[3];
+ __le16 handle;
+ __le16 related_handle;
+ __le64 timestamp;
+ u8 maint_op_class;
+ u8 reserved[15];
+} __packed;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+struct cxl_event_generic {
+ struct cxl_event_record_hdr hdr;
+ u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
+} __packed;
+
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+struct cxl_event_gen_media {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+ u8 device[3];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 reserved[46];
+} __packed;
+
+/*
+ * DRAM Event Record - DER
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
+ */
+#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
+struct cxl_event_dram {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ u8 column[2];
+ u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
+ u8 reserved[0x17];
+} __packed;
+
+/*
+ * Get Health Info Record
+ * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
+ */
+struct cxl_get_health_info {
+ u8 health_status;
+ u8 media_status;
+ u8 add_status;
+ u8 life_used;
+ u8 device_temp[2];
+ u8 dirty_shutdown_cnt[4];
+ u8 cor_vol_err_cnt[4];
+ u8 cor_per_err_cnt[4];
+} __packed;
+
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+struct cxl_event_mem_module {
+ struct cxl_event_record_hdr hdr;
+ u8 event_type;
+ struct cxl_get_health_info info;
+ u8 reserved[0x3d];
+} __packed;
+
+union cxl_event {
+ struct cxl_event_generic generic;
+ struct cxl_event_gen_media gen_media;
+ struct cxl_event_dram dram;
+ struct cxl_event_mem_module mem_module;
+} __packed;
+
+/*
+ * Common Event Record Format; in event logs
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_raw {
+ uuid_t id;
+ union cxl_event event;
+} __packed;
+
+enum cxl_event_type {
+ CXL_CPER_EVENT_GENERIC,
+ CXL_CPER_EVENT_GEN_MEDIA,
+ CXL_CPER_EVENT_DRAM,
+ CXL_CPER_EVENT_MEM_MODULE,
+};
+
+#define CPER_CXL_DEVICE_ID_VALID BIT(0)
+#define CPER_CXL_DEVICE_SN_VALID BIT(1)
+#define CPER_CXL_COMP_EVENT_LOG_VALID BIT(2)
+struct cxl_cper_event_rec {
+ struct {
+ u32 length;
+ u64 validation_bits;
+ struct cper_cxl_event_devid {
+ u16 vendor_id;
+ u16 device_id;
+ u8 func_num;
+ u8 device_num;
+ u8 bus_num;
+ u16 segment_num;
+ u16 slot_num; /* bits 2:0 reserved */
+ u8 reserved;
+ } __packed device_id;
+ struct cper_cxl_event_sn {
+ u32 lower_dw;
+ u32 upper_dw;
+ } __packed dev_serial_num;
+ } __packed hdr;
+
+ union cxl_event event;
+} __packed;
+
+#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/linux/damon.h b/include/linux/damon.h
index d5d4d19928e0..5881e4ac30be 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -2,7 +2,7 @@
/*
* DAMON api
*
- * Author: SeongJae Park <sjpark@amazon.de>
+ * Author: SeongJae Park <sj@kernel.org>
*/
#ifndef _DAMON_H_
@@ -40,9 +40,24 @@ struct damon_addr_range {
* @ar: The address range of the region.
* @sampling_addr: Address of the sample for the next access check.
* @nr_accesses: Access frequency of this region.
+ * @nr_accesses_bp: @nr_accesses in basis point (0.01%) that updated for
+ * each sampling interval.
* @list: List head for siblings.
* @age: Age of this region.
*
+ * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be
+ * increased for every &damon_attrs->sample_interval if an access to the region
+ * during the last sampling interval is found. The update of this field should
+ * not be done with direct access but with the helper function,
+ * damon_update_region_access_rate().
+ *
+ * @nr_accesses_bp is another representation of @nr_accesses in basis point
+ * (1 in 10,000) that updated for every &damon_attrs->sample_interval in a
+ * manner similar to moving sum. By the algorithm, this value becomes
+ * @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval. This can
+ * be used when the aggregation interval is too huge and therefore cannot wait
+ * for it before getting the access monitoring results.
+ *
* @age is initially zero, increased for each aggregation interval, and reset
* to zero again if the access frequency is significantly changed. If two
* regions are merged into a new region, both @nr_accesses and @age of the new
@@ -52,6 +67,7 @@ struct damon_region {
struct damon_addr_range ar;
unsigned long sampling_addr;
unsigned int nr_accesses;
+ unsigned int nr_accesses_bp;
struct list_head list;
unsigned int age;
@@ -120,6 +136,9 @@ enum damos_action {
* @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
* @weight_age: Weight of the region's age for prioritization.
*
+ * @get_score: Feedback function for self-tuning quota.
+ * @get_score_arg: Parameter for @get_score
+ *
* To avoid consuming too much CPU time or IO resources for applying the
* &struct damos->action to large memory, DAMON allows users to set time and/or
* size quotas. The quotas can be set by writing non-zero values to &ms and
@@ -137,6 +156,17 @@ enum damos_action {
* You could customize the prioritization logic by setting &weight_sz,
* &weight_nr_accesses, and &weight_age, because monitoring operations are
* encouraged to respect those.
+ *
+ * If @get_score function pointer is set, DAMON calls it back with
+ * @get_score_arg and get the return value of it for every @reset_interval.
+ * Then, DAMON adjusts the effective quota using the return value as a feedback
+ * score to the current quota, using its internal feedback loop algorithm.
+ *
+ * The feedback loop algorithem assumes the quota input and the feedback score
+ * output are in a positive proportional relationship, and the goal of the
+ * tuning is getting the feedback screo value of 10,000. If @ms and/or @sz are
+ * set together, those work as a hard limit quota. If neither @ms nor @sz are
+ * set, the mechanism starts from the quota of one byte.
*/
struct damos_quota {
unsigned long ms;
@@ -147,6 +177,9 @@ struct damos_quota {
unsigned int weight_nr_accesses;
unsigned int weight_age;
+ unsigned long (*get_score)(void *arg);
+ void *get_score_arg;
+
/* private: */
/* For throughput estimation */
unsigned long total_charged_sz;
@@ -163,6 +196,9 @@ struct damos_quota {
/* For prioritization */
unsigned long histogram[DAMOS_MAX_SCORE + 1];
unsigned int min_score;
+
+ /* For feedback loop */
+ unsigned long esz_bp;
};
/**
@@ -226,16 +262,26 @@ struct damos_stat {
* enum damos_filter_type - Type of memory for &struct damos_filter
* @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
* @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
+ * @DAMOS_FILTER_TYPE_ADDR: Address range.
+ * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target.
* @NR_DAMOS_FILTER_TYPES: Number of filter types.
*
- * The support of each filter type is up to running &struct damon_operations.
- * &enum DAMON_OPS_PADDR is supporting all filter types, while
- * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR are not supporting any
- * filter types.
+ * The anon pages type and memcg type filters are handled by underlying
+ * &struct damon_operations as a part of scheme action trying, and therefore
+ * accounted as 'tried'. In contrast, other types are handled by core layer
+ * before trying of the action and therefore not accounted as 'tried'.
+ *
+ * The support of the filters that handled by &struct damon_operations depend
+ * on the running &struct damon_operations.
+ * &enum DAMON_OPS_PADDR supports both anon pages type and memcg type filters,
+ * while &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR don't support any of
+ * the two types.
*/
enum damos_filter_type {
DAMOS_FILTER_TYPE_ANON,
DAMOS_FILTER_TYPE_MEMCG,
+ DAMOS_FILTER_TYPE_ADDR,
+ DAMOS_FILTER_TYPE_TARGET,
NR_DAMOS_FILTER_TYPES,
};
@@ -244,18 +290,24 @@ enum damos_filter_type {
* @type: Type of the page.
* @matching: If the matching page should filtered out or in.
* @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
+ * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR.
+ * @target_idx: Index of the &struct damon_target of
+ * &damon_ctx->adaptive_targets if @type is
+ * DAMOS_FILTER_TYPE_TARGET.
* @list: List head for siblings.
*
* Before applying the &damos->action to a memory region, DAMOS checks if each
* page of the region matches to this and avoid applying the action if so.
- * Note that the check support is up to &struct damon_operations
- * implementation.
+ * Support of each filter type depends on the running &struct damon_operations
+ * and the type. Refer to &enum damos_filter_type for more detai.
*/
struct damos_filter {
enum damos_filter_type type;
bool matching;
union {
unsigned short memcg_id;
+ struct damon_addr_range addr_range;
+ int target_idx;
};
struct list_head list;
};
@@ -282,24 +334,24 @@ struct damos_access_pattern {
* struct damos - Represents a Data Access Monitoring-based Operation Scheme.
* @pattern: Access pattern of target regions.
* @action: &damo_action to be applied to the target regions.
+ * @apply_interval_us: The time between applying the @action.
* @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme.
* @filters: Additional set of &struct damos_filter for &action.
* @stat: Statistics of this scheme.
* @list: List head for siblings.
*
- * For each aggregation interval, DAMON finds regions which fit in the
+ * For each @apply_interval_us, DAMON finds regions which fit in the
* &pattern and applies &action to those. To avoid consuming too much
* CPU time or IO resources for the &action, &quota is used.
*
+ * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead.
+ *
* To do the work only when needed, schemes can be activated for specific
* system situations using &wmarks. If all schemes that registered to the
* monitoring context are inactive, DAMON stops monitoring either, and just
* repeatedly checks the watermarks.
*
- * If all schemes that registered to a &struct damon_ctx are inactive, DAMON
- * stops monitoring and just repeatedly checks the watermarks.
- *
* Before applying the &action to a memory region, &struct damon_operations
* implementation could check pages of the region and skip &action to respect
* &filters
@@ -311,6 +363,14 @@ struct damos_access_pattern {
struct damos {
struct damos_access_pattern pattern;
enum damos_action action;
+ unsigned long apply_interval_us;
+/* private: internal use only */
+ /*
+ * number of sample intervals that should be passed before applying
+ * @action
+ */
+ unsigned long next_apply_sis;
+/* public: */
struct damos_quota quota;
struct damos_watermarks wmarks;
struct list_head filters;
@@ -456,13 +516,14 @@ struct damon_callback {
* regions.
*
* For each @sample_interval, DAMON checks whether each region is accessed or
- * not. It aggregates and keeps the access information (number of accesses to
- * each region) for @aggr_interval time. DAMON also checks whether the target
- * memory regions need update (e.g., by ``mmap()`` calls from the application,
- * in case of virtual memory monitoring) and applies the changes for each
- * @ops_update_interval. All time intervals are in micro-seconds.
- * Please refer to &struct damon_operations and &struct damon_callback for more
- * detail.
+ * not during the last @sample_interval. If such access is found, DAMON
+ * aggregates the information by increasing &damon_region->nr_accesses for
+ * @aggr_interval time. For each @aggr_interval, the count is reset. DAMON
+ * also checks whether the target memory regions need update (e.g., by
+ * ``mmap()`` calls from the application, in case of virtual memory monitoring)
+ * and applies the changes for each @ops_update_interval. All time intervals
+ * are in micro-seconds. Please refer to &struct damon_operations and &struct
+ * damon_callback for more detail.
*/
struct damon_attrs {
unsigned long sample_interval;
@@ -506,8 +567,20 @@ struct damon_ctx {
struct damon_attrs attrs;
/* private: internal use only */
- struct timespec64 last_aggregation;
- struct timespec64 last_ops_update;
+ /* number of sample intervals that passed since this context started */
+ unsigned long passed_sample_intervals;
+ /*
+ * number of sample intervals that should be passed before next
+ * aggregation
+ */
+ unsigned long next_aggregation_sis;
+ /*
+ * number of sample intervals that should be passed before next ops
+ * update
+ */
+ unsigned long next_ops_update_sis;
+ /* for waiting until the execution of the kdamond_fn is started */
+ struct completion kdamond_started;
/* public: */
struct task_struct *kdamond;
@@ -592,6 +665,8 @@ void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
unsigned int nr_ranges);
+void damon_update_region_access_rate(struct damon_region *r, bool accessed,
+ struct damon_attrs *attrs);
struct damos_filter *damos_new_filter(enum damos_filter_type type,
bool matching);
@@ -599,7 +674,9 @@ void damos_add_filter(struct damos *s, struct damos_filter *f);
void damos_destroy_filter(struct damos_filter *f);
struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
- enum damos_action action, struct damos_quota *quota,
+ enum damos_action action,
+ unsigned long apply_interval_us,
+ struct damos_quota *quota,
struct damos_watermarks *wmarks);
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
void damon_destroy_scheme(struct damos *s);
@@ -626,6 +703,13 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
}
+static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
+{
+ /* {aggr,sample}_interval are unsigned long, hence could overflow */
+ return min(attrs->aggr_interval / attrs->sample_interval,
+ (unsigned long)UINT_MAX);
+}
+
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
diff --git a/include/linux/dax.h b/include/linux/dax.h
index bf6258472e49..b463502b16e1 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -159,8 +159,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct page *dax_layout_busy_page(struct address_space *mapping);
struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
-dax_entry_t dax_lock_page(struct page *page);
-void dax_unlock_page(struct page *page, dax_entry_t cookie);
+dax_entry_t dax_lock_folio(struct folio *folio);
+void dax_unlock_folio(struct folio *folio, dax_entry_t cookie);
dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
unsigned long index, struct page **page);
void dax_unlock_mapping_entry(struct address_space *mapping,
@@ -182,14 +182,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP;
}
-static inline dax_entry_t dax_lock_page(struct page *page)
+static inline dax_entry_t dax_lock_folio(struct folio *folio)
{
- if (IS_DAX(page->mapping->host))
+ if (IS_DAX(folio->mapping->host))
return ~0UL;
return 0;
}
-static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
+static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
{
}
@@ -241,10 +241,10 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size, pfn_t pfn);
+ unsigned int order, pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
@@ -261,6 +261,19 @@ static inline bool dax_mapping(struct address_space *mapping)
return mapping->host && IS_DAX(mapping->host);
}
+/*
+ * Due to dax's memory and block duo personalities, hwpoison reporting
+ * takes into consideration which personality is presently visible.
+ * When dax acts like a block device, such as in block IO, an encounter of
+ * dax hwpoison is reported as -EIO.
+ * When dax acts like memory, such as in page fault, a detection of hwpoison
+ * is reported as -EHWPOISON which leads to VM_FAULT_HWPOISON.
+ */
+static inline int dax_mem2blk_err(int err)
+{
+ return (err == -EHWPOISON) ? -EIO : err;
+}
+
#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
void hmem_register_resource(int target_nid, struct resource *r);
#else
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 6b351e009f59..d07cf2f1bb7d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -68,12 +68,12 @@ extern const struct qstr dotdot_name;
* large memory footprint increase).
*/
#ifdef CONFIG_64BIT
-# define DNAME_INLINE_LEN 32 /* 192 bytes */
+# define DNAME_INLINE_LEN 40 /* 192 bytes */
#else
# ifdef CONFIG_SMP
-# define DNAME_INLINE_LEN 36 /* 128 bytes */
-# else
# define DNAME_INLINE_LEN 40 /* 128 bytes */
+# else
+# define DNAME_INLINE_LEN 44 /* 128 bytes */
# endif
#endif
@@ -101,8 +101,8 @@ struct dentry {
struct list_head d_lru; /* LRU list */
wait_queue_head_t *d_wait; /* in-lookup ones only */
};
- struct list_head d_child; /* child of parent list */
- struct list_head d_subdirs; /* our children */
+ struct hlist_node d_sib; /* child of parent list */
+ struct hlist_head d_children; /* our children */
/*
* d_alias and d_rcu can share memory
*/
@@ -111,7 +111,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
-} __randomize_layout;
+};
/*
* dentry->d_lock spinlock nesting subclasses:
@@ -151,13 +151,13 @@ struct dentry_operations {
*/
/* d_flags entries */
-#define DCACHE_OP_HASH 0x00000001
-#define DCACHE_OP_COMPARE 0x00000002
-#define DCACHE_OP_REVALIDATE 0x00000004
-#define DCACHE_OP_DELETE 0x00000008
-#define DCACHE_OP_PRUNE 0x00000010
+#define DCACHE_OP_HASH BIT(0)
+#define DCACHE_OP_COMPARE BIT(1)
+#define DCACHE_OP_REVALIDATE BIT(2)
+#define DCACHE_OP_DELETE BIT(3)
+#define DCACHE_OP_PRUNE BIT(4)
-#define DCACHE_DISCONNECTED 0x00000020
+#define DCACHE_DISCONNECTED BIT(5)
/* This dentry is possibly not currently connected to the dcache tree, in
* which case its parent will either be itself, or will have this flag as
* well. nfsd will not use a dentry with this bit set, but will first
@@ -168,50 +168,47 @@ struct dentry_operations {
* dentry into place and return that dentry rather than the passed one,
* typically using d_splice_alias. */
-#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
+#define DCACHE_REFERENCED BIT(6) /* Recently used, don't discard. */
-#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */
+#define DCACHE_DONTCACHE BIT(7) /* Purge from memory on final dput() */
-#define DCACHE_CANT_MOUNT 0x00000100
-#define DCACHE_GENOCIDE 0x00000200
-#define DCACHE_SHRINK_LIST 0x00000400
+#define DCACHE_CANT_MOUNT BIT(8)
+#define DCACHE_GENOCIDE BIT(9)
+#define DCACHE_SHRINK_LIST BIT(10)
-#define DCACHE_OP_WEAK_REVALIDATE 0x00000800
+#define DCACHE_OP_WEAK_REVALIDATE BIT(11)
-#define DCACHE_NFSFS_RENAMED 0x00001000
+#define DCACHE_NFSFS_RENAMED BIT(12)
/* this dentry has been "silly renamed" and has to be deleted on the last
* dput() */
-#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000
+#define DCACHE_FSNOTIFY_PARENT_WATCHED BIT(14)
/* Parent inode is watched by some fsnotify listener */
-#define DCACHE_DENTRY_KILLED 0x00008000
+#define DCACHE_DENTRY_KILLED BIT(15)
-#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */
+#define DCACHE_MOUNTED BIT(16) /* is a mountpoint */
+#define DCACHE_NEED_AUTOMOUNT BIT(17) /* handle automount on this dir */
+#define DCACHE_MANAGE_TRANSIT BIT(18) /* manage transit from this dirent */
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
-#define DCACHE_LRU_LIST 0x00080000
+#define DCACHE_LRU_LIST BIT(19)
-#define DCACHE_ENTRY_TYPE 0x00700000
-#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
-#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */
-#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */
-#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */
-#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */
-#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */
+#define DCACHE_ENTRY_TYPE (7 << 20) /* bits 20..22 are for storing type: */
+#define DCACHE_MISS_TYPE (0 << 20) /* Negative dentry */
+#define DCACHE_WHITEOUT_TYPE (1 << 20) /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE (2 << 20) /* Normal directory */
+#define DCACHE_AUTODIR_TYPE (3 << 20) /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE (4 << 20) /* Regular file type */
+#define DCACHE_SPECIAL_TYPE (5 << 20) /* Other file type */
+#define DCACHE_SYMLINK_TYPE (6 << 20) /* Symlink */
-#define DCACHE_MAY_FREE 0x00800000
-#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_NOKEY_NAME 0x02000000 /* Encrypted name encoded without key */
-#define DCACHE_OP_REAL 0x04000000
+#define DCACHE_NOKEY_NAME BIT(25) /* Encrypted name encoded without key */
+#define DCACHE_OP_REAL BIT(26)
-#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
-#define DCACHE_DENTRY_CURSOR 0x20000000
-#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
+#define DCACHE_PAR_LOOKUP BIT(28) /* being looked up (with parent locked shared) */
+#define DCACHE_DENTRY_CURSOR BIT(29)
+#define DCACHE_NORCU BIT(30) /* No RCU delay for freeing */
extern seqlock_t rename_lock;
@@ -220,8 +217,6 @@ extern seqlock_t rename_lock;
*/
extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
-extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
-extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
@@ -242,15 +237,12 @@ extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
-extern void shrink_dcache_for_umount(struct super_block *);
extern void d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_make_root(struct inode *);
-/* <clickety>-<click> the ramfs-type tree */
-extern void d_genocide(struct dentry *);
-
+extern void d_mark_tmpfile(struct file *, struct inode *);
extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
@@ -273,12 +265,8 @@ extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
-/* appendix may either be NULL or be used for transname suffixes */
extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
-extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
- const struct qstr *name, unsigned *seq);
static inline unsigned d_count(const struct dentry *dentry)
{
@@ -300,20 +288,40 @@ extern char *dentry_path(const struct dentry *, char *, int);
/* Allocation counts.. */
/**
- * dget, dget_dlock - get a reference to a dentry
- * @dentry: dentry to get a reference to
+ * dget_dlock - get a reference to a dentry
+ * @dentry: dentry to get a reference to
*
- * Given a dentry or %NULL pointer increment the reference count
- * if appropriate and return the dentry. A dentry will not be
- * destroyed when it has references.
+ * Given a live dentry, increment the reference count and return the dentry.
+ * Caller must hold @dentry->d_lock. Making sure that dentry is alive is
+ * caller's resonsibility. There are many conditions sufficient to guarantee
+ * that; e.g. anything with non-negative refcount is alive, so's anything
+ * hashed, anything positive, anyone's parent, etc.
*/
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
- if (dentry)
- dentry->d_lockref.count++;
+ dentry->d_lockref.count++;
return dentry;
}
+
+/**
+ * dget - get a reference to a dentry
+ * @dentry: dentry to get a reference to
+ *
+ * Given a dentry or %NULL pointer increment the reference count
+ * if appropriate and return the dentry. A dentry will not be
+ * destroyed when it has references. Conversely, a dentry with
+ * no references can disappear for any number of reasons, starting
+ * with memory pressure. In other words, that primitive is
+ * used to clone an existing reference; using it on something with
+ * zero refcount is a bug.
+ *
+ * NOTE: it will spin if @dentry->d_lock is held. From the deadlock
+ * avoidance point of view it is equivalent to spin_lock()/increment
+ * refcount/spin_unlock(), so calling it under @dentry->d_lock is
+ * always a bug; so's calling it under ->d_lock on any of its descendents.
+ *
+ */
static inline struct dentry *dget(struct dentry *dentry)
{
if (dentry)
@@ -324,12 +332,11 @@ static inline struct dentry *dget(struct dentry *dentry)
extern struct dentry *dget_parent(struct dentry *dentry);
/**
- * d_unhashed - is dentry hashed
- * @dentry: entry to check
+ * d_unhashed - is dentry hashed
+ * @dentry: entry to check
*
- * Returns true if the dentry passed is not currently hashed.
+ * Returns true if the dentry passed is not currently hashed.
*/
-
static inline int d_unhashed(const struct dentry *dentry)
{
return hlist_bl_unhashed(&dentry->d_hash);
@@ -489,14 +496,6 @@ static inline int simple_positive(const struct dentry *dentry)
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
-extern void d_set_fallthru(struct dentry *dentry);
-
-static inline bool d_is_fallthru(const struct dentry *dentry)
-{
- return dentry->d_flags & DCACHE_FALLTHRU;
-}
-
-
extern int sysctl_vfs_cache_pressure;
static inline unsigned long vfs_pressure_ratio(unsigned long val)
@@ -546,21 +545,6 @@ static inline struct inode *d_backing_inode(const struct dentry *upper)
}
/**
- * d_backing_dentry - Get upper or lower dentry we should be using
- * @upper: The upper layer
- *
- * This is the helper that should be used to get the dentry of the inode that
- * will be used if this dentry were opened as a file. It may be the upper
- * dentry or it may be a lower dentry pinned by the upper.
- *
- * Normal filesystems should not use this to access their own dentries.
- */
-static inline struct dentry *d_backing_dentry(struct dentry *upper)
-{
- return upper;
-}
-
-/**
* d_real - Return the real dentry
* @dentry: the dentry to query
* @inode: inode to select the dentry from multiple layers (can be NULL)
@@ -599,4 +583,14 @@ struct name_snapshot {
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
+static inline struct dentry *d_first_child(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_children.first, struct dentry, d_sib);
+}
+
+static inline struct dentry *d_next_sibling(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib);
+}
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index ea2d919fd9c7..c9c65b132c0f 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -171,6 +171,25 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
+/**
+ * struct debugfs_cancellation - cancellation data
+ * @list: internal, for keeping track
+ * @cancel: callback to call
+ * @cancel_data: extra data for the callback to call
+ */
+struct debugfs_cancellation {
+ struct list_head list;
+ void (*cancel)(struct dentry *, void *);
+ void *cancel_data;
+};
+
+void __acquires(cancellation)
+debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+void __releases(cancellation)
+debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+
#else
#include <linux/err.h>
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 9192986b1a73..ac862422df15 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -48,7 +48,7 @@ MALLOC_VISIBLE void *malloc(int size)
if (!malloc_ptr)
malloc_ptr = free_mem_ptr;
- malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+ malloc_ptr = (malloc_ptr + 7) & ~7; /* Align */
p = (void *)malloc_ptr;
malloc_ptr += size;
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 039e7e0c7378..ff9cda975e30 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -56,6 +56,7 @@ static inline void ndelay(unsigned long x)
extern unsigned long lpj_fine;
void calibrate_delay(void);
+unsigned long calibrate_delay_is_known(void);
void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 8904063d4c9f..6bfe70decc9f 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -274,4 +274,6 @@ do { \
WARN_ONCE(condition, "%s %s: " format, \
dev_driver_string(dev), dev_name(dev), ## arg)
+__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+
#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 7fd704bb8f3d..d312ffbac4dd 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -108,7 +108,6 @@ struct devfreq_dev_profile {
unsigned long initial_freq;
unsigned int polling_ms;
enum devfreq_timer timer;
- bool is_cooling_device;
int (*target)(struct device *dev, unsigned long *freq, u32 flags);
int (*get_dev_status)(struct device *dev,
@@ -118,6 +117,8 @@ struct devfreq_dev_profile {
unsigned long *freq_table;
unsigned int max_state;
+
+ bool is_cooling_device;
};
/**
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a52d2b9a6846..772ab4d74d94 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -165,18 +165,17 @@ void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct dax_device *dax_dev;
- fmode_t mode;
+ blk_mode_t mode;
char name[16];
};
-dev_t dm_get_dev_t(const char *path);
-
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
*/
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
struct dm_dev **result);
void dm_put_device(struct dm_target *ti, struct dm_dev *d);
@@ -545,7 +544,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
/*
* First create an empty table.
*/
-int dm_table_create(struct dm_table **result, fmode_t mode,
+int dm_table_create(struct dm_table **result, blk_mode_t mode,
unsigned int num_targets, struct mapped_device *md);
/*
@@ -588,7 +587,7 @@ void dm_sync_table(struct mapped_device *md);
* Queries
*/
sector_t dm_table_get_size(struct dm_table *t);
-fmode_t dm_table_get_mode(struct dm_table *t);
+blk_mode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
const char *dm_table_device_name(struct dm_table *t);
diff --git a/include/linux/device.h b/include/linux/device.h
index 472dd24d4823..97c4b046c09d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -30,6 +30,7 @@
#include <linux/device/bus.h>
#include <linux/device/class.h>
#include <linux/device/driver.h>
+#include <linux/cleanup.h>
#include <asm/device.h>
struct device;
@@ -41,7 +42,6 @@ struct class;
struct subsys_private;
struct device_node;
struct fwnode_handle;
-struct iommu_ops;
struct iommu_group;
struct dev_pin_info;
struct dev_iommu;
@@ -62,7 +62,7 @@ struct msi_device_data;
*/
struct subsys_interface {
const char *name;
- struct bus_type *subsys;
+ const struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
@@ -71,9 +71,9 @@ struct subsys_interface {
int subsys_interface_register(struct subsys_interface *sif);
void subsys_interface_unregister(struct subsys_interface *sif);
-int subsys_system_register(struct bus_type *subsys,
+int subsys_system_register(const struct bus_type *subsys,
const struct attribute_group **groups);
-int subsys_virtual_register(struct bus_type *subsys,
+int subsys_virtual_register(const struct bus_type *subsys,
const struct attribute_group **groups);
/*
@@ -96,7 +96,12 @@ struct device_type {
const struct dev_pm_ops *pm;
};
-/* interface for exporting device attributes */
+/**
+ * struct device_attribute - Interface for exporting device attributes.
+ * @attr: sysfs attribute definition.
+ * @show: Show handler.
+ * @store: Store handler.
+ */
struct device_attribute {
struct attribute attr;
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
@@ -105,6 +110,11 @@ struct device_attribute {
const char *buf, size_t count);
};
+/**
+ * struct dev_ext_attribute - Exported device attribute with extra context.
+ * @attr: Exported device attribute.
+ * @var: Pointer to context.
+ */
struct dev_ext_attribute {
struct device_attribute attr;
void *var;
@@ -123,30 +133,124 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
+/**
+ * DEVICE_ATTR - Define a device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Convenience macro for defining a struct device_attribute.
+ *
+ * For example, ``DEVICE_ATTR(foo, 0644, foo_show, foo_store);`` expands to:
+ *
+ * .. code-block:: c
+ *
+ * struct device_attribute dev_attr_foo = {
+ * .attr = { .name = "foo", .mode = 0644 },
+ * .show = foo_show,
+ * .store = foo_store,
+ * };
+ */
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_PREALLOC - Define a preallocated device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Like DEVICE_ATTR(), but ``SYSFS_PREALLOC`` is set on @_mode.
+ */
#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_PREALLOC(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_RW - Define a read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0644, @_show is <_name>_show,
+ * and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RW - Define an admin-only read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RW(), but @_mode is 0600.
+ */
#define DEVICE_ATTR_ADMIN_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
+
+/**
+ * DEVICE_ATTR_RO - Define a readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0444 and @_show is <_name>_show.
+ */
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RO - Define an admin-only readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RO(), but @_mode is 0400.
+ */
#define DEVICE_ATTR_ADMIN_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
+
+/**
+ * DEVICE_ATTR_WO - Define an admin-only writable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0200 and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
+
+/**
+ * DEVICE_ULONG_ATTR - Define a device attribute backed by an unsigned long.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of unsigned long.
+ *
+ * Like DEVICE_ATTR(), but @_show and @_store are automatically provided
+ * such that reads and writes to the attribute from userspace affect @_var.
+ */
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+
+/**
+ * DEVICE_INT_ATTR - Define a device attribute backed by an int.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of int.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is an int.
+ */
#define DEVICE_INT_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
+
+/**
+ * DEVICE_BOOL_ATTR - Define a device attribute backed by a bool.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of bool.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is a bool.
+ */
#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
+
#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
@@ -223,6 +327,17 @@ static inline void *devm_kcalloc(struct device *dev,
{
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
+static inline __realloc_size(3, 4) void * __must_check
+devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return devm_krealloc(dev, p, bytes, flags);
+}
+
void devm_kfree(struct device *dev, const void *p);
char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
@@ -233,6 +348,7 @@ unsigned long devm_get_free_pages(struct device *dev,
gfp_t gfp_mask, unsigned int order);
void devm_free_pages(struct device *dev, unsigned long addr);
+#ifdef CONFIG_HAS_IOMEM
void __iomem *devm_ioremap_resource(struct device *dev,
const struct resource *res);
void __iomem *devm_ioremap_resource_wc(struct device *dev,
@@ -241,14 +357,39 @@ void __iomem *devm_ioremap_resource_wc(struct device *dev,
void __iomem *devm_of_iomap(struct device *dev,
struct device_node *node, int index,
resource_size_t *size);
+#else
+
+static inline
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_ioremap_resource_wc(struct device *dev,
+ const struct resource *res)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_of_iomap(struct device *dev,
+ struct device_node *node, int index,
+ resource_size_t *size)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif
/* allows to add/remove a custom action to devres stack */
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
void devm_release_action(struct device *dev, void (*action)(void *), void *data);
int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
-#define devm_add_action(release, action, data) \
- __devm_add_action(release, action, data, #action)
+#define devm_add_action(dev, action, data) \
+ __devm_add_action(dev, action, data, #action)
static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
void *data, const char *name)
@@ -261,8 +402,8 @@ static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(
return ret;
}
-#define devm_add_action_or_reset(release, action, data) \
- __devm_add_action_or_reset(release, action, data, #action)
+#define devm_add_action_or_reset(dev, action, data) \
+ __devm_add_action_or_reset(dev, action, data, #action)
/**
* devm_alloc_percpu - Resource-managed alloc_percpu
@@ -509,7 +650,10 @@ struct device_physical_location {
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
- * @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
+ * @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
+ * @dma_io_tlb_pools: List of transient swiotlb memory pools.
+ * @dma_io_tlb_lock: Protects changes to the list of active pools.
+ * @dma_uses_io_tlb: %true if device has used the software IO TLB.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@@ -517,7 +661,6 @@ struct device_physical_location {
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
* @devres_head: The resources list of the device.
- * @knode_class: The node used to add the device to the class list.
* @class: The class of the device.
* @groups: Optional attribute groups.
* @release: Callback to free the device after all references have
@@ -616,6 +759,11 @@ struct device {
#ifdef CONFIG_SWIOTLB
struct io_tlb_mem *dma_io_tlb_mem;
#endif
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head dma_io_tlb_pools;
+ spinlock_t dma_io_tlb_lock;
+ bool dma_uses_io_tlb;
+#endif
/* arch specific additions */
struct dev_archdata archdata;
@@ -700,6 +848,11 @@ static inline bool device_iommu_mapped(struct device *dev)
/* Get the wakeup routines, which depend on struct device */
#include <linux/pm_wakeup.h>
+/**
+ * dev_name - Return a device's name.
+ * @dev: Device with name to get.
+ * Return: The kobject name of the device, or its initial name if unavailable.
+ */
static inline const char *dev_name(const struct device *dev)
{
/* Use the init name until the kobject becomes available */
@@ -852,6 +1005,8 @@ static inline void device_unlock(struct device *dev)
mutex_unlock(&dev->mutex);
}
+DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
+
static inline void device_lock_assert(struct device *dev)
{
lockdep_assert_held(&dev->mutex);
@@ -899,6 +1054,9 @@ void device_unregister(struct device *dev);
void device_initialize(struct device *dev);
int __must_check device_add(struct device *dev);
void device_del(struct device *dev);
+
+DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
+
int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
int device_for_each_child_reverse(struct device *dev, void *data,
@@ -913,7 +1071,6 @@ int device_rename(struct device *dev, const char *new_name);
int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
-int device_is_dependent(struct device *dev, void *target);
static inline bool device_supports_offline(struct device *dev)
{
@@ -1066,6 +1223,9 @@ extern int (*platform_notify_remove)(struct device *dev);
*/
struct device *get_device(struct device *dev);
void put_device(struct device *dev);
+
+DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
+
bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
@@ -1088,8 +1248,6 @@ void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
-__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
-
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index ae10c4322754..5ef4ec1c36c3 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -62,9 +62,6 @@ struct fwnode_handle;
* this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
- * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
- * driver implementations to a bus and allow the driver to do
- * bus-specific setup
* @need_parent_lock: When probing or removing a device on this bus, the
* device core should lock the device's parent.
*
@@ -104,8 +101,6 @@ struct bus_type {
const struct dev_pm_ops *pm;
- const struct iommu_ops *iommu_ops;
-
bool need_parent_lock;
};
@@ -232,7 +227,7 @@ bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *));
-void bus_sort_breadthfirst(struct bus_type *bus,
+void bus_sort_breadthfirst(const struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b));
/*
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index abf3d3bfb6fe..c576b49c55c2 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -40,8 +40,6 @@ struct fwnode_handle;
* for the devices belonging to the class. Usually tied to
* device's namespace.
* @pm: The default device power management operations of this class.
- * @p: The private data of the driver core, no one other than the
- * driver core can touch this.
*
* A class is a higher-level view of a device that abstracts out low-level
* implementation details. Drivers may see a SCSI disk or an ATA disk, but,
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index c244267a6744..7738f458995f 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -126,7 +126,7 @@ int __must_check driver_register(struct device_driver *drv);
void driver_unregister(struct device_driver *drv);
struct device_driver *driver_find(const char *name, const struct bus_type *bus);
-int driver_probe_done(void);
+bool __init driver_probe_done(void);
void wait_for_device_probe(void);
void __init wait_for_init_devices_probe(void);
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
index e6d76e8715a6..15fc856d198c 100644
--- a/include/linux/dlm_plock.h
+++ b/include/linux/dlm_plock.h
@@ -11,6 +11,8 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
int cmd, struct file_lock *fl);
int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
+int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl);
int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
#endif
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 681656a1c03d..75e7d8cbb532 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -38,6 +38,8 @@ dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
*/
void dm_bufio_client_destroy(struct dm_bufio_client *c);
+void dm_bufio_client_reset(struct dm_bufio_client *c);
+
/*
* Set the sector range.
* When this function is called, there must be no I/O in progress on the bufio
diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h
index 552b817ab102..3ac6dbaeaa37 100644
--- a/include/linux/dm-verity-loadpin.h
+++ b/include/linux/dm-verity-loadpin.h
@@ -12,7 +12,7 @@ extern struct list_head dm_verity_loadpin_trusted_root_digests;
struct dm_verity_loadpin_trusted_root_digest {
struct list_head node;
unsigned int len;
- u8 data[];
+ u8 data[] __counted_by(len);
};
#if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY)
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3f31baa3293f..8ff4add71f88 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -343,16 +343,19 @@ struct dma_buf {
/**
* @exp_name:
*
- * Name of the exporter; useful for debugging. See the
- * DMA_BUF_SET_NAME IOCTL.
+ * Name of the exporter; useful for debugging. Must not be NULL
*/
const char *exp_name;
/**
* @name:
*
- * Userspace-provided name; useful for accounting and debugging,
- * protected by dma_resv_lock() on @resv and @name_lock for read access.
+ * Userspace-provided name. Default value is NULL. If not NULL,
+ * length cannot be longer than DMA_BUF_NAME_LEN, including NIL
+ * char. Useful for accounting and debugging. Read/Write accesses
+ * are protected by @name_lock
+ *
+ * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B
*/
const char *name;
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 18aade195884..3eb3589ff43e 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -21,7 +21,6 @@ struct bus_dma_region {
phys_addr_t cpu_start;
dma_addr_t dma_start;
u64 size;
- u64 offset;
};
static inline dma_addr_t translate_phys_to_dma(struct device *dev,
@@ -29,9 +28,12 @@ static inline dma_addr_t translate_phys_to_dma(struct device *dev,
{
const struct bus_dma_region *m;
- for (m = dev->dma_range_map; m->size; m++)
- if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
- return (dma_addr_t)paddr - m->offset;
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = paddr - m->cpu_start;
+
+ if (paddr >= m->cpu_start && offset < m->size)
+ return m->dma_start + offset;
+ }
/* make sure dma_capable fails when no translation is available */
return DMA_MAPPING_ERROR;
@@ -42,9 +44,12 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev,
{
const struct bus_dma_region *m;
- for (m = dev->dma_range_map; m->size; m++)
- if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size)
- return (phys_addr_t)dma_addr + m->offset;
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = dma_addr - m->dma_start;
+
+ if (dma_addr >= m->dma_start && offset < m->size)
+ return m->cpu_start + offset;
+ }
return (phys_addr_t)-1;
}
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index d54b595a0fe0..e06bad467f55 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/printk.h>
#include <linux/rcupdate.h>
+#include <linux/timekeeping.h>
struct dma_fence;
struct dma_fence_ops;
@@ -499,6 +500,21 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
}
/**
+ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2 or the same fence. Both
+ * fences must be from the same context, since a seqno is not re-used across
+ * contexts.
+ */
+static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ return f1 == f2 || dma_fence_is_later(f1, f2);
+}
+
+/**
* dma_fence_later - return the chronologically later fence
* @f1: the first fence from the same context
* @f2: the second fence from the same context
@@ -568,6 +584,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
fence->error = error;
}
+/**
+ * dma_fence_timestamp - helper to get the completion timestamp of a fence
+ * @fence: fence to get the timestamp from.
+ *
+ * After a fence is signaled the timestamp is updated with the signaling time,
+ * but setting the timestamp can race with tasks waiting for the signaling. This
+ * helper busy waits for the correct timestamp to appear.
+ */
+static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
+{
+ if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
+ return ktime_get();
+
+ while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+ cpu_relax();
+
+ return fence->timestamp;
+}
+
signed long dma_fence_wait_timeout(struct dma_fence *,
bool intr, signed long timeout);
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
@@ -606,7 +641,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline);
struct dma_fence *dma_fence_get_stub(void);
-struct dma_fence *dma_fence_allocate_private_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
u64 dma_fence_context_alloc(unsigned num);
extern const struct dma_fence_ops dma_fence_array_ops;
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 31f114f486c4..4abc60f04209 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -8,8 +8,10 @@
#include <linux/dma-mapping.h>
#include <linux/pgtable.h>
+#include <linux/slab.h>
struct cma;
+struct iommu_ops;
/*
* Values for struct dma_map_ops.flags:
@@ -168,12 +170,6 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
}
#endif /* CONFIG_DMA_CMA*/
-#ifdef CONFIG_DMA_PERNUMA_CMA
-void dma_pernuma_cma_reserve(void);
-#else
-static inline void dma_pernuma_cma_reserve(void) { }
-#endif /* CONFIG_DMA_PERNUMA_CMA */
-
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
@@ -277,11 +273,77 @@ static inline bool dev_is_dma_coherent(struct device *dev)
}
#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
+/*
+ * Check whether potential kmalloc() buffers are safe for non-coherent DMA.
+ */
+static inline bool dma_kmalloc_safe(struct device *dev,
+ enum dma_data_direction dir)
+{
+ /*
+ * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
+ * caches have already been aligned to a DMA-safe size.
+ */
+ if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
+ return true;
+
+ /*
+ * kmalloc() buffers are DMA-safe irrespective of size if the device
+ * is coherent or the direction is DMA_TO_DEVICE (non-desctructive
+ * cache maintenance and benign cache line evictions).
+ */
+ if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
+ * sufficiently aligned for non-coherent DMA.
+ */
+static inline bool dma_kmalloc_size_aligned(size_t size)
+{
+ /*
+ * Larger kmalloc() sizes are guaranteed to be aligned to
+ * ARCH_DMA_MINALIGN.
+ */
+ if (size >= 2 * ARCH_DMA_MINALIGN ||
+ IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
+ return true;
+
+ return false;
+}
+
+/*
+ * Check whether the given object size may have originated from a kmalloc()
+ * buffer with a slab alignment below the DMA-safe alignment and needs
+ * bouncing for non-coherent DMA. The pointer alignment is not considered and
+ * in-structure DMA-safe offsets are the responsibility of the caller. Such
+ * code should use the static ARCH_DMA_MINALIGN for compiler annotations.
+ *
+ * The heuristics can have false positives, bouncing unnecessarily, though the
+ * buffers would be small. False negatives are theoretically possible if, for
+ * example, multiple small kmalloc() buffers are coalesced into a larger
+ * buffer that passes the alignment check. There are no such known constructs
+ * in the kernel.
+ */
+static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
+ enum dma_data_direction dir)
+{
+ return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
+}
+
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
+#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
+void arch_dma_set_mask(struct device *dev, u64 mask);
+#else
+#define arch_dma_set_mask(dev, mask) do { } while (0)
+#endif
+
#ifdef CONFIG_MMU
/*
* Page protection so that devices that can't snoop CPU caches can use the
@@ -365,10 +427,10 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
+ bool coherent);
#else
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu, bool coherent)
+ u64 size, bool coherent)
{
}
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
@@ -382,10 +444,10 @@ static inline void arch_teardown_dma_ops(struct device *dev)
#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
#ifdef CONFIG_DMA_API_DEBUG
-void dma_debug_add_bus(struct bus_type *bus);
+void dma_debug_add_bus(const struct bus_type *bus);
void debug_dma_dump_mappings(struct device *dev);
#else
-static inline void dma_debug_add_bus(struct bus_type *bus)
+static inline void dma_debug_add_bus(const struct bus_type *bus)
{
}
static inline void debug_dma_dump_mappings(struct device *dev)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 0ee20b764000..4a658de44ee9 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
+#include <linux/cache.h>
#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/device.h>
@@ -143,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
+bool dma_addressing_limited(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
size_t dma_opt_mapping_size(struct device *dev);
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
@@ -263,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
{
return 0;
}
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return false;
+}
static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
@@ -417,6 +423,8 @@ static inline void dma_sync_sgtable_for_device(struct device *dev,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
+bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
+
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
@@ -462,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
-/**
- * dma_addressing_limited - return if the device is addressing limited
- * @dev: device to check
- *
- * Return %true if the devices DMA mask is too small to address all memory in
- * the system, else %false. Lack of addressing bits is the prime reason for
- * bounce buffering, but might not be the only one.
- */
-static inline bool dma_addressing_limited(struct device *dev)
-{
- return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
- dma_get_required_mask(dev);
-}
-
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->max_segment_size)
@@ -543,13 +537,15 @@ static inline int dma_set_min_align_mask(struct device *dev,
return 0;
}
+#ifndef dma_get_cache_alignment
static inline int dma_get_cache_alignment(void)
{
-#ifdef ARCH_DMA_MINALIGN
+#ifdef ARCH_HAS_DMA_MINALIGN
return ARCH_DMA_MINALIGN;
#endif
return 1;
}
+#endif
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
index d2638d9259dc..3080747689f6 100644
--- a/include/linux/dma/edma.h
+++ b/include/linux/dma/edma.h
@@ -40,7 +40,7 @@ struct dw_edma_region {
* iATU windows. That will be done by the controller
* automatically.
*/
-struct dw_edma_core_ops {
+struct dw_edma_plat_ops {
int (*irq_vector)(struct device *dev, unsigned int nr);
u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
};
@@ -48,7 +48,8 @@ struct dw_edma_core_ops {
enum dw_edma_map_format {
EDMA_MF_EDMA_LEGACY = 0x0,
EDMA_MF_EDMA_UNROLL = 0x1,
- EDMA_MF_HDMA_COMPAT = 0x5
+ EDMA_MF_HDMA_COMPAT = 0x5,
+ EDMA_MF_HDMA_NATIVE = 0x7,
};
/**
@@ -80,7 +81,7 @@ enum dw_edma_chip_flags {
struct dw_edma_chip {
struct device *dev;
int nr_irqs;
- const struct dw_edma_core_ops *ops;
+ const struct dw_edma_plat_ops *ops;
u32 flags;
void __iomem *reg_base;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c3656e590213..752dbde4cec1 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -517,8 +517,6 @@ static inline const char *dma_chan_name(struct dma_chan *chan)
return dev_name(&chan->dev->device);
}
-void dma_chan_cleanup(struct kref *kref);
-
/**
* typedef dma_filter_fn - callback filter for dma_request_channel
* @chan: channel to be reviewed
@@ -955,7 +953,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
static inline bool is_slave_direction(enum dma_transfer_direction direction)
{
- return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
+ (direction == DMA_DEV_TO_DEV);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 725d5e6acec0..e34b601b71fd 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -106,8 +106,6 @@ static inline bool dmar_rcu_check(void)
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
extern void dmar_register_bus_notifier(void);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
@@ -202,67 +200,74 @@ static inline void detect_intel_iommu(void)
struct irte {
union {
- /* Shared between remapped and posted mode*/
struct {
- __u64 present : 1, /* 0 */
- fpd : 1, /* 1 */
- __res0 : 6, /* 2 - 6 */
- avail : 4, /* 8 - 11 */
- __res1 : 3, /* 12 - 14 */
- pst : 1, /* 15 */
- vector : 8, /* 16 - 23 */
- __res2 : 40; /* 24 - 63 */
+ union {
+ /* Shared between remapped and posted mode*/
+ struct {
+ __u64 present : 1, /* 0 */
+ fpd : 1, /* 1 */
+ __res0 : 6, /* 2 - 6 */
+ avail : 4, /* 8 - 11 */
+ __res1 : 3, /* 12 - 14 */
+ pst : 1, /* 15 */
+ vector : 8, /* 16 - 23 */
+ __res2 : 40; /* 24 - 63 */
+ };
+
+ /* Remapped mode */
+ struct {
+ __u64 r_present : 1, /* 0 */
+ r_fpd : 1, /* 1 */
+ dst_mode : 1, /* 2 */
+ redir_hint : 1, /* 3 */
+ trigger_mode : 1, /* 4 */
+ dlvry_mode : 3, /* 5 - 7 */
+ r_avail : 4, /* 8 - 11 */
+ r_res0 : 4, /* 12 - 15 */
+ r_vector : 8, /* 16 - 23 */
+ r_res1 : 8, /* 24 - 31 */
+ dest_id : 32; /* 32 - 63 */
+ };
+
+ /* Posted mode */
+ struct {
+ __u64 p_present : 1, /* 0 */
+ p_fpd : 1, /* 1 */
+ p_res0 : 6, /* 2 - 7 */
+ p_avail : 4, /* 8 - 11 */
+ p_res1 : 2, /* 12 - 13 */
+ p_urgent : 1, /* 14 */
+ p_pst : 1, /* 15 */
+ p_vector : 8, /* 16 - 23 */
+ p_res2 : 14, /* 24 - 37 */
+ pda_l : 26; /* 38 - 63 */
+ };
+ __u64 low;
+ };
+
+ union {
+ /* Shared between remapped and posted mode*/
+ struct {
+ __u64 sid : 16, /* 64 - 79 */
+ sq : 2, /* 80 - 81 */
+ svt : 2, /* 82 - 83 */
+ __res3 : 44; /* 84 - 127 */
+ };
+
+ /* Posted mode*/
+ struct {
+ __u64 p_sid : 16, /* 64 - 79 */
+ p_sq : 2, /* 80 - 81 */
+ p_svt : 2, /* 82 - 83 */
+ p_res3 : 12, /* 84 - 95 */
+ pda_h : 32; /* 96 - 127 */
+ };
+ __u64 high;
+ };
};
-
- /* Remapped mode */
- struct {
- __u64 r_present : 1, /* 0 */
- r_fpd : 1, /* 1 */
- dst_mode : 1, /* 2 */
- redir_hint : 1, /* 3 */
- trigger_mode : 1, /* 4 */
- dlvry_mode : 3, /* 5 - 7 */
- r_avail : 4, /* 8 - 11 */
- r_res0 : 4, /* 12 - 15 */
- r_vector : 8, /* 16 - 23 */
- r_res1 : 8, /* 24 - 31 */
- dest_id : 32; /* 32 - 63 */
- };
-
- /* Posted mode */
- struct {
- __u64 p_present : 1, /* 0 */
- p_fpd : 1, /* 1 */
- p_res0 : 6, /* 2 - 7 */
- p_avail : 4, /* 8 - 11 */
- p_res1 : 2, /* 12 - 13 */
- p_urgent : 1, /* 14 */
- p_pst : 1, /* 15 */
- p_vector : 8, /* 16 - 23 */
- p_res2 : 14, /* 24 - 37 */
- pda_l : 26; /* 38 - 63 */
- };
- __u64 low;
- };
-
- union {
- /* Shared between remapped and posted mode*/
- struct {
- __u64 sid : 16, /* 64 - 79 */
- sq : 2, /* 80 - 81 */
- svt : 2, /* 82 - 83 */
- __res3 : 44; /* 84 - 127 */
- };
-
- /* Posted mode*/
- struct {
- __u64 p_sid : 16, /* 64 - 79 */
- p_sq : 2, /* 80 - 81 */
- p_svt : 2, /* 82 - 83 */
- p_res3 : 12, /* 84 - 95 */
- pda_h : 32; /* 96 - 127 */
- };
- __u64 high;
+#ifdef CONFIG_IRQ_REMAP
+ __u128 irte;
+#endif
};
};
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index b1d26f9f1c9f..9f183a679277 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -30,7 +30,7 @@ struct dnotify_struct {
FS_MOVED_FROM | FS_MOVED_TO)
extern void dnotify_flush(struct file *, fl_owner_t);
-extern int fcntl_dirnotify(int, struct file *, unsigned long);
+extern int fcntl_dirnotify(int, struct file *, unsigned int);
#else
@@ -38,7 +38,7 @@ static inline void dnotify_flush(struct file *filp, fl_owner_t id)
{
}
-static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
+static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
{
return -EINVAL;
}
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
new file mode 100644
index 000000000000..e37344f6a231
--- /dev/null
+++ b/include/linux/dpll.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel and affiliates
+ */
+
+#ifndef __DPLL_H__
+#define __DPLL_H__
+
+#include <uapi/linux/dpll.h>
+#include <linux/device.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+struct dpll_device;
+struct dpll_pin;
+
+struct dpll_device_ops {
+ int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack);
+ int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack);
+ int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
+ s32 *temp, struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_ops {
+ int (*frequency_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u64 frequency,
+ struct netlink_ext_ack *extack);
+ int (*frequency_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack);
+ int (*direction_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack);
+ int (*direction_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*prio_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack);
+ int (*prio_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u32 prio, struct netlink_ext_ack *extack);
+ int (*phase_offset_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *phase_offset,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const s32 phase_adjust,
+ struct netlink_ext_ack *extack);
+ int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_frequency {
+ u64 min;
+ u64 max;
+};
+
+#define DPLL_PIN_FREQUENCY_RANGE(_min, _max) \
+ { \
+ .min = _min, \
+ .max = _max, \
+ }
+
+#define DPLL_PIN_FREQUENCY(_val) DPLL_PIN_FREQUENCY_RANGE(_val, _val)
+#define DPLL_PIN_FREQUENCY_1PPS \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_1_HZ)
+#define DPLL_PIN_FREQUENCY_10MHZ \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_MHZ)
+#define DPLL_PIN_FREQUENCY_IRIG_B \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_KHZ)
+#define DPLL_PIN_FREQUENCY_DCF77 \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_77_5_KHZ)
+
+struct dpll_pin_phase_adjust_range {
+ s32 min;
+ s32 max;
+};
+
+struct dpll_pin_properties {
+ const char *board_label;
+ const char *panel_label;
+ const char *package_label;
+ enum dpll_pin_type type;
+ unsigned long capabilities;
+ u32 freq_supported_num;
+ struct dpll_pin_frequency *freq_supported;
+ struct dpll_pin_phase_adjust_range phase_range;
+};
+
+#if IS_ENABLED(CONFIG_DPLL)
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
+void dpll_netdev_pin_clear(struct net_device *dev);
+
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev);
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev);
+#else
+static inline void
+dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) { }
+static inline void dpll_netdev_pin_clear(struct net_device *dev) { }
+
+static inline size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline int
+dpll_netdev_add_pin_handle(struct sk_buff *msg, const struct net_device *dev)
+{
+ return 0;
+}
+#endif
+
+struct dpll_device *
+dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module);
+
+void dpll_device_put(struct dpll_device *dpll);
+
+int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
+ const struct dpll_device_ops *ops, void *priv);
+
+void dpll_device_unregister(struct dpll_device *dpll,
+ const struct dpll_device_ops *ops, void *priv);
+
+struct dpll_pin *
+dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module,
+ const struct dpll_pin_properties *prop);
+
+int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_put(struct dpll_pin *pin);
+
+int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+int dpll_device_change_ntf(struct dpll_device *dpll);
+
+int dpll_pin_change_ntf(struct dpll_pin *pin);
+
+#endif
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 159e43171ccc..b9dd35d4b8f5 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -28,7 +28,7 @@
/* Source and Destination MAC of follow-up meta frames.
* Whereas the choice of SMAC only affects the unique identification of the
* switch as sender of meta frames, the DMAC must be an address that is present
- * in the DSA master port's multicast MAC filter.
+ * in the DSA conduit port's multicast MAC filter.
* 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588
* over L2 use this address for some purpose already.
*/
@@ -48,13 +48,9 @@ struct sja1105_deferred_xmit_work {
/* Global tagger data */
struct sja1105_tagger_data {
- /* Tagger to switch */
void (*xmit_work_fn)(struct kthread_work *work);
void (*meta_tstamp_handler)(struct dsa_switch *ds, int port, u8 ts_id,
enum sja1110_meta_tstamp dir, u64 tstamp);
- /* Switch to tagger */
- bool (*rxtstamp_get_state)(struct dsa_switch *ds);
- void (*rxtstamp_set_state)(struct dsa_switch *ds, bool on);
};
struct sja1105_skb_cb {
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 061dd84d09f3..4fcbf4d4fd0a 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -37,10 +37,12 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+#define _DPRINTK_FLAGS_INCL_SOURCENAME (1<<5)
#define _DPRINTK_FLAGS_INCL_ANY \
(_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
- _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID)
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID |\
+ _DPRINTK_FLAGS_INCL_SOURCENAME)
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
diff --git a/include/linux/edac.h b/include/linux/edac.h
index fa4bda2a70f6..b4ee8961e623 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -30,7 +30,7 @@ struct device;
extern int edac_op_state;
-struct bus_type *edac_get_sysfs_subsys(void);
+const struct bus_type *edac_get_sysfs_subsys(void);
static inline void opstate_init(void)
{
@@ -187,6 +187,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_NVDIMM: Non-volatile RAM
* @MEM_WIO2: Wide I/O 2.
* @MEM_HBM2: High bandwidth Memory Gen 2.
+ * @MEM_HBM3: High bandwidth Memory Gen 3.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -218,6 +219,7 @@ enum mem_type {
MEM_NVDIMM,
MEM_WIO2,
MEM_HBM2,
+ MEM_HBM3,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -248,6 +250,7 @@ enum mem_type {
#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
+#define MEM_FLAG_HBM3 BIT(MEM_HBM3)
/**
* enum edac_type - Error Detection and Correction capabilities and mode
@@ -492,7 +495,7 @@ struct edac_raw_error_desc {
*/
struct mem_ctl_info {
struct device dev;
- struct bus_type *bus;
+ const struct bus_type *bus;
struct list_head link; /* for global list of mem_ctl_info structs */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 571d1a6e1b74..c74f47711f0b 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -24,10 +24,11 @@
#include <linux/range.h>
#include <linux/reboot.h>
#include <linux/uuid.h>
-#include <linux/screen_info.h>
#include <asm/page.h>
+struct screen_info;
+
#define EFI_SUCCESS 0
#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
@@ -39,6 +40,7 @@
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_ACCESS_DENIED (15 | (1UL << (BITS_PER_LONG-1)))
#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1)))
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
@@ -108,7 +110,8 @@ typedef struct {
#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
#define EFI_PAL_CODE 13
#define EFI_PERSISTENT_MEMORY 14
-#define EFI_MAX_MEMORY_TYPE 15
+#define EFI_UNACCEPTED_MEMORY 15
+#define EFI_MAX_MEMORY_TYPE 16
/* Attribute values: */
#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
@@ -356,13 +359,10 @@ void efi_native_runtime_setup(void);
* where the UEFI SPEC breaks the line.
*/
#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
-#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
-#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
@@ -417,6 +417,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_MOK_VARIABLE_TABLE_GUID EFI_GUID(0xc451ed2b, 0x9694, 0x45d3, 0xba, 0xba, 0xed, 0x9f, 0x89, 0x88, 0xa3, 0x89)
#define LINUX_EFI_COCO_SECRET_AREA_GUID EFI_GUID(0xadf956ad, 0xe98c, 0x484c, 0xae, 0x11, 0xb5, 0x1c, 0x7d, 0x33, 0x64, 0x47)
#define LINUX_EFI_BOOT_MEMMAP_GUID EFI_GUID(0x800f683f, 0xd08b, 0x423a, 0xa2, 0x93, 0x96, 0x5c, 0x3c, 0x6f, 0xe2, 0xb4)
+#define LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID EFI_GUID(0xd5d1de3c, 0x105c, 0x44f9, 0x9e, 0xa9, 0xbc, 0xef, 0x98, 0x12, 0x00, 0x31)
#define RISCV_EFI_BOOT_PROTOCOL_GUID EFI_GUID(0xccd15fec, 0x6f73, 0x4eec, 0x83, 0x95, 0x3e, 0x69, 0xe4, 0xb9, 0x40, 0xbf)
@@ -435,6 +436,9 @@ void efi_native_runtime_setup(void);
#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55)
#define AMD_SEV_MEM_ENCRYPT_GUID EFI_GUID(0x0cf29b71, 0x9e51, 0x433a, 0xa3, 0xb7, 0x81, 0xf3, 0xab, 0x16, 0xb8, 0x75)
+/* OVMF protocol GUIDs */
+#define OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID EFI_GUID(0xc5a010fe, 0x38a7, 0x4531, 0x8a, 0x4a, 0x05, 0x00, 0xd2, 0xfd, 0x16, 0x49)
+
typedef struct {
efi_guid_t guid;
u64 table;
@@ -534,6 +538,14 @@ struct efi_boot_memmap {
efi_memory_desc_t map[];
};
+struct efi_unaccepted_memory {
+ u32 version;
+ u32 unit_size;
+ u64 phys_base;
+ u64 size;
+ unsigned long bitmap[];
+};
+
/*
* Architecture independent structure for describing a memory map for the
* benefit of efi_memmap_init_early(), and for passing context between
@@ -636,6 +648,7 @@ extern struct efi {
unsigned long tpm_final_log; /* TPM2 Final Events Log table */
unsigned long mokvar_table; /* MOK variable config table */
unsigned long coco_secret; /* Confidential computing secret table */
+ unsigned long unaccepted; /* Unaccepted memory table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
@@ -712,7 +725,6 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
return EFI_SUCCESS;
}
#endif
-extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern int __init __efi_memmap_init(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
@@ -837,10 +849,6 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len)
return 1;
}
-#ifdef CONFIG_EFI_PCDP
-extern int __init efi_setup_pcdp_console(char *);
-#endif
-
/*
* We play games with efi_enabled so that the compiler will, if
* possible, remove EFI-related code altogether.
@@ -1042,6 +1050,7 @@ struct efivar_operations {
efi_set_variable_t *set_variable;
efi_set_variable_t *set_variable_nonblocking;
efi_query_variable_store_t *query_variable_store;
+ efi_query_variable_info_t *query_variable_info;
};
struct efivars {
@@ -1049,6 +1058,12 @@ struct efivars {
const struct efivar_operations *ops;
};
+#ifdef CONFIG_X86
+u64 __attribute_const__ efivar_reserved_space(void);
+#else
+static inline u64 efivar_reserved_space(void) { return 0; }
+#endif
+
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
@@ -1087,6 +1102,10 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size, void *data);
+efi_status_t efivar_query_variable_info(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size);
+
#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER)
extern bool efi_capsule_pending(int *reset_type);
@@ -1105,7 +1124,7 @@ extern bool efi_runtime_disabled(void);
static inline bool efi_runtime_disabled(void) { return true; }
#endif
-extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+extern void efi_call_virt_check_flags(unsigned long flags, const void *caller);
extern unsigned long efi_call_virt_save_flags(void);
enum efi_secureboot_mode {
@@ -1146,8 +1165,7 @@ static inline void efi_check_for_embedded_firmwares(void) { }
#define arch_efi_call_virt(p, f, args...) ((p)->f(args))
/*
- * Arch code can implement the following three template macros, avoiding
- * reptition for the void/non-void return cases of {__,}efi_call_virt():
+ * Arch code must implement the following three routines:
*
* * arch_efi_call_virt_setup()
*
@@ -1156,9 +1174,8 @@ static inline void efi_check_for_embedded_firmwares(void) { }
*
* * arch_efi_call_virt()
*
- * Performs the call. The last expression in the macro must be the call
- * itself, allowing the logic to be shared by the void and non-void
- * cases.
+ * Performs the call. This routine takes a variable number of arguments so
+ * it must be implemented as a variadic preprocessor macro.
*
* * arch_efi_call_virt_teardown()
*
@@ -1167,33 +1184,20 @@ static inline void efi_check_for_embedded_firmwares(void) { }
#define efi_call_virt_pointer(p, f, args...) \
({ \
- efi_status_t __s; \
+ typeof((p)->f(args)) __s; \
unsigned long __flags; \
\
arch_efi_call_virt_setup(); \
\
__flags = efi_call_virt_save_flags(); \
__s = arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
+ efi_call_virt_check_flags(__flags, NULL); \
\
arch_efi_call_virt_teardown(); \
\
__s; \
})
-#define __efi_call_virt_pointer(p, f, args...) \
-({ \
- unsigned long __flags; \
- \
- arch_efi_call_virt_setup(); \
- \
- __flags = efi_call_virt_save_flags(); \
- arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
- \
- arch_efi_call_virt_teardown(); \
-})
-
#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
@@ -1219,6 +1223,10 @@ extern int efi_tpm_final_log_size;
extern unsigned long rci2_table_phys;
+efi_status_t
+efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
+ u64 param_buffer_addr, void *context);
+
/*
* efi_runtime_service() function identifiers.
* "NONE" is used by efi_recover_from_page_fault() to check if the page
@@ -1238,25 +1246,26 @@ enum efi_rts_ids {
EFI_RESET_SYSTEM,
EFI_UPDATE_CAPSULE,
EFI_QUERY_CAPSULE_CAPS,
+ EFI_ACPI_PRM_HANDLER,
};
+union efi_rts_args;
+
/*
* efi_runtime_work: Details of EFI Runtime Service work
- * @arg<1-5>: EFI Runtime Service function arguments
+ * @args: Pointer to union describing the arguments
* @status: Status of executing EFI Runtime Service
* @efi_rts_id: EFI Runtime Service function identifier
* @efi_rts_comp: Struct used for handling completions
+ * @caller: The caller of the runtime service
*/
struct efi_runtime_work {
- void *arg1;
- void *arg2;
- void *arg3;
- void *arg4;
- void *arg5;
- efi_status_t status;
- struct work_struct work;
- enum efi_rts_ids efi_rts_id;
- struct completion efi_rts_comp;
+ union efi_rts_args *args;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+ const void *caller;
};
extern struct efi_runtime_work efi_rts_work;
@@ -1340,4 +1349,15 @@ bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+/*
+ * efivar ops event type
+ */
+#define EFIVAR_OPS_RDONLY 0
+#define EFIVAR_OPS_RDWR 1
+
+extern struct blocking_notifier_head efivar_ops_nh;
+
+void efivars_generic_ops_register(void);
+void efivars_generic_ops_unregister(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h
index 3bea95a1af53..e533f4513194 100644
--- a/include/linux/elf-fdpic.h
+++ b/include/linux/elf-fdpic.h
@@ -10,13 +10,25 @@
#include <uapi/linux/elf-fdpic.h>
+#if ELF_CLASS == ELFCLASS32
+#define Elf_Sword Elf32_Sword
+#define elf_fdpic_loadseg elf32_fdpic_loadseg
+#define elf_fdpic_loadmap elf32_fdpic_loadmap
+#define ELF_FDPIC_LOADMAP_VERSION ELF32_FDPIC_LOADMAP_VERSION
+#else
+#define Elf_Sword Elf64_Sxword
+#define elf_fdpic_loadmap elf64_fdpic_loadmap
+#define elf_fdpic_loadseg elf64_fdpic_loadseg
+#define ELF_FDPIC_LOADMAP_VERSION ELF64_FDPIC_LOADMAP_VERSION
+#endif
+
/*
* binfmt binary parameters structure
*/
struct elf_fdpic_params {
struct elfhdr hdr; /* ref copy of ELF header */
struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */
- struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
+ struct elf_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
unsigned long elfhdr_addr; /* mapped ELF header user address */
unsigned long ph_addr; /* mapped PT_PHDR user address */
unsigned long map_addr; /* mapped loadmap user address */
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index b9caa01dfac4..88d91e087471 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -224,7 +224,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
unsigned long max_util, unsigned long sum_util,
unsigned long allowed_cpu_cap)
{
- unsigned long freq, scale_cpu;
+ unsigned long freq, ref_freq, scale_cpu;
struct em_perf_state *ps;
int cpu;
@@ -241,11 +241,10 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
*/
cpu = cpumask_first(to_cpumask(pd->cpus));
scale_cpu = arch_scale_cpu_capacity(cpu);
- ps = &pd->table[pd->nr_perf_states - 1];
+ ref_freq = arch_scale_freq_ref(cpu);
- max_util = map_util_perf(max_util);
max_util = min(max_util, allowed_cpu_cap);
- freq = map_util_freq(max_util, ps->frequency, scale_cpu);
+ freq = map_util_freq(max_util, ref_freq, scale_cpu);
/*
* Find the lowest performance state of the Energy Model above the
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index d95ab85f96ba..b0fb775a600d 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -7,6 +7,11 @@
#include <linux/syscalls.h>
#include <linux/seccomp.h>
#include <linux/sched.h>
+#include <linux/context_tracking.h>
+#include <linux/livepatch.h>
+#include <linux/resume_user_mode.h>
+#include <linux/tick.h>
+#include <linux/kmsan.h>
#include <asm/entry-common.h>
@@ -98,7 +103,19 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
* done between establishing state and enabling interrupts. The caller must
* enable interrupts before invoking syscall_enter_from_user_mode_work().
*/
-void enter_from_user_mode(struct pt_regs *regs);
+static __always_inline void enter_from_user_mode(struct pt_regs *regs)
+{
+ arch_enter_from_user_mode(regs);
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
+ CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
/**
* syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
@@ -117,6 +134,9 @@ void enter_from_user_mode(struct pt_regs *regs);
*/
void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+long syscall_trace_enter(struct pt_regs *regs, long syscall,
+ unsigned long work);
+
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
* a syscall
@@ -140,7 +160,15 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
* ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
* 2) Invocation of audit_syscall_entry()
*/
-long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+
+ if (work & SYSCALL_WORK_ENTER)
+ syscall = syscall_trace_enter(regs, syscall, work);
+
+ return syscall;
+}
/**
* syscall_enter_from_user_mode - Establish state and check and handle work
@@ -159,7 +187,19 @@ long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
* Returns: The original or a modified syscall number. See
* syscall_enter_from_user_mode_work() for further explanation.
*/
-long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+{
+ long ret;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ local_irq_enable();
+ ret = syscall_enter_from_user_mode_work(regs, syscall);
+ instrumentation_end();
+
+ return ret;
+}
/**
* local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
@@ -259,6 +299,43 @@ static __always_inline void arch_exit_to_user_mode(void) { }
void arch_do_signal_or_restart(struct pt_regs *regs);
/**
+ * exit_to_user_mode_loop - do any pending work before leaving to user space
+ */
+unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ unsigned long ti_work);
+
+/**
+ * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * 1) check that interrupts are disabled
+ * 2) call tick_nohz_user_enter_prepare()
+ * 3) call exit_to_user_mode_loop() if any flags from
+ * EXIT_TO_USER_MODE_WORK are set
+ * 4) check that interrupts are still disabled
+ */
+static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ unsigned long ti_work;
+
+ lockdep_assert_irqs_disabled();
+
+ /* Flush pending rcuog wakeup before the last need_resched() check */
+ tick_nohz_user_enter_prepare();
+
+ ti_work = read_thread_flags();
+ if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+
+ arch_exit_to_user_mode_prepare(regs, ti_work);
+
+ /* Ensure that kernel state is sane for a return to userspace */
+ kmap_assert_nomap();
+ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+}
+
+/**
* exit_to_user_mode - Fixup state when exiting to user mode
*
* Syscall/interrupt exit enables interrupts, but the kernel state is
@@ -276,7 +353,17 @@ void arch_do_signal_or_restart(struct pt_regs *regs);
* non-instrumentable.
* The caller has to invoke syscall_exit_to_user_mode_work() before this.
*/
-void exit_to_user_mode(void);
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ user_enter_irqoff();
+ arch_exit_to_user_mode();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
/**
* syscall_exit_to_user_mode_work - Handle work before returning to user mode
diff --git a/include/linux/err.h b/include/linux/err.h
index a139c64aef2a..b5d9bb2a2349 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -19,23 +19,54 @@
#ifndef __ASSEMBLY__
+/**
+ * IS_ERR_VALUE - Detect an error pointer.
+ * @x: The pointer to check.
+ *
+ * Like IS_ERR(), but does not generate a compiler warning if result is unused.
+ */
#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
+/**
+ * ERR_PTR - Create an error pointer.
+ * @error: A negative error code.
+ *
+ * Encodes @error into a pointer value. Users should consider the result
+ * opaque and not assume anything about how the error is encoded.
+ *
+ * Return: A pointer with @error encoded within its value.
+ */
static inline void * __must_check ERR_PTR(long error)
{
return (void *) error;
}
+/**
+ * PTR_ERR - Extract the error code from an error pointer.
+ * @ptr: An error pointer.
+ * Return: The error code within @ptr.
+ */
static inline long __must_check PTR_ERR(__force const void *ptr)
{
return (long) ptr;
}
+/**
+ * IS_ERR - Detect an error pointer.
+ * @ptr: The pointer to check.
+ * Return: true if @ptr is an error pointer, false otherwise.
+ */
static inline bool __must_check IS_ERR(__force const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
+/**
+ * IS_ERR_OR_NULL - Detect an error pointer or a null pointer.
+ * @ptr: The pointer to check.
+ *
+ * Like IS_ERR(), but also returns true for a null pointer.
+ */
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
@@ -54,6 +85,23 @@ static inline void * __must_check ERR_CAST(__force const void *ptr)
return (void *) ptr;
}
+/**
+ * PTR_ERR_OR_ZERO - Extract the error code from a pointer if it has one.
+ * @ptr: A potential error pointer.
+ *
+ * Convenience function that can be used inside a function that returns
+ * an error code to propagate errors received as error pointers.
+ * For example, ``return PTR_ERR_OR_ZERO(ptr);`` replaces:
+ *
+ * .. code-block:: c
+ *
+ * if (IS_ERR(ptr))
+ * return PTR_ERR(ptr);
+ * else
+ * return 0;
+ *
+ * Return: The error code within @ptr if it is an error pointer; 0 otherwise.
+ */
static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
{
if (IS_ERR(ptr))
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 62b61527bcc4..325e0778e937 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -95,6 +95,7 @@ struct kernel_ethtool_ringparam {
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
* @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
* @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len
+ * @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split
*/
enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
@@ -102,6 +103,7 @@ enum ethtool_supported_ring_param {
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
ETHTOOL_RING_USE_RX_PUSH = BIT(3),
ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4),
+ ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5),
};
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
@@ -409,8 +411,10 @@ struct ethtool_pause_stats {
* not entire FEC data blocks. This is a non-standard statistic.
* Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS.
*
- * @lane: per-lane/PCS-instance counts as defined by the standard
- * @total: error counts for the entire port, for drivers incapable of reporting
+ * For each of the above fields, the two substructure members are:
+ *
+ * - @lanes: per-lane/PCS-instance counts as defined by the standard
+ * - @total: error counts for the entire port, for drivers incapable of reporting
* per-lane stats
*
* Drivers should fill in either only total or per-lane statistics, core
@@ -595,9 +599,46 @@ struct ethtool_mm_stats {
};
/**
+ * struct ethtool_rxfh_param - RXFH (RSS) parameters
+ * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
+ * Valid values are one of the %ETH_RSS_HASH_*.
+ * @indir_size: On SET, the array size of the user buffer for the
+ * indirection table, which may be zero, or
+ * %ETH_RXFH_INDIR_NO_CHANGE. On GET (read from the driver),
+ * the array size of the hardware indirection table.
+ * @indir: The indirection table of size @indir_size entries.
+ * @key_size: On SET, the array size of the user buffer for the hash key,
+ * which may be zero. On GET (read from the driver), the size of the
+ * hardware hash key.
+ * @key: The hash key of size @key_size bytes.
+ * @rss_context: RSS context identifier. Context 0 is the default for normal
+ * traffic; other contexts can be referenced as the destination for RX flow
+ * classification rules. On SET, %ETH_RXFH_CONTEXT_ALLOC is used
+ * to allocate a new RSS context; on return this field will
+ * contain the ID of the newly allocated context.
+ * @rss_delete: Set to non-ZERO to remove the @rss_context context.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ */
+struct ethtool_rxfh_param {
+ u8 hfunc;
+ u32 indir_size;
+ u32 *indir;
+ u32 key_size;
+ u8 *key;
+ u32 rss_context;
+ u8 rss_delete;
+ u8 input_xfrm;
+};
+
+/**
* struct ethtool_ops - optional netdev operations
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
+ * @cap_rss_ctx_supported: indicates if the driver supports RSS
+ * contexts.
+ * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor
+ * RSS.
* @supported_coalesce_params: supported types of interrupt coalescing.
* @supported_ring_params: supported ring params.
* @get_drvinfo: Report driver/device information. Modern drivers no
@@ -694,15 +735,6 @@ struct ethtool_mm_stats {
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
- * @get_rxfh_context: Get the contents of the RX flow hash indirection table,
- * hash key, and/or hash function assiciated to the given rss context.
- * Returns a negative error code or zero.
- * @set_rxfh_context: Create, remove and configure RSS contexts. Allows setting
- * the contents of the RX flow hash indirection table, hash key, and/or
- * hash function associated to the given context. Arguments which are set
- * to %NULL or zero will remain unchanged.
- * Returns a negative error code or zero. An error code must be returned
- * if at least one unsupported change was requested.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -785,6 +817,8 @@ struct ethtool_mm_stats {
*/
struct ethtool_ops {
u32 cap_link_lanes_supported:1;
+ u32 cap_rss_ctx_supported:1;
+ u32 cap_rss_sym_xor_supported:1;
u32 supported_coalesce_params;
u32 supported_ring_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
@@ -844,15 +878,9 @@ struct ethtool_ops {
int (*reset)(struct net_device *, u32 *);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
- int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
- u8 *hfunc);
- int (*set_rxfh)(struct net_device *, const u32 *indir,
- const u8 *key, const u8 hfunc);
- int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key,
- u8 *hfunc, u32 rss_context);
- int (*set_rxfh_context)(struct net_device *, const u32 *indir,
- const u8 *key, const u8 hfunc,
- u32 *rss_context, bool delete);
+ int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
+ int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
+ struct netlink_ext_ack *extack);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -1044,12 +1072,52 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
}
/**
+ * ethtool_get_ts_info_by_layer - Obtains time stamping capabilities from the MAC or PHY layer.
+ * @dev: pointer to net_device structure
+ * @info: buffer to hold the result
+ * Returns zero on success, non-zero otherwise.
+ */
+int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info);
+
+/**
* ethtool_sprintf - Write formatted string to ethtool string data
- * @data: Pointer to start of string to update
+ * @data: Pointer to a pointer to the start of string to update
* @fmt: Format of string to write
*
- * Write formatted string to data. Update data to point at start of
+ * Write formatted string to *data. Update *data to point at start of
* next string.
*/
extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+
+/**
+ * ethtool_puts - Write string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to update
+ * @str: String to write
+ *
+ * Write string to *data without a trailing newline. Update *data
+ * to point at start of next string.
+ *
+ * Prefer this function to ethtool_sprintf() when given only
+ * two arguments or if @fmt is just "%s".
+ */
+extern void ethtool_puts(u8 **data, const char *str);
+
+/* Link mode to forced speed capabilities maps */
+struct ethtool_forced_speed_map {
+ u32 speed;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+ const u32 *cap_arr;
+ u32 arr_size;
+};
+
+#define ETHTOOL_FORCED_SPEED_MAP(prefix, value) \
+{ \
+ .speed = SPEED_##value, \
+ .cap_arr = prefix##_##value, \
+ .arr_size = ARRAY_SIZE(prefix##_##value), \
+}
+
+void
+ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size);
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 36a486505b08..e32bee4345fb 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -9,12 +9,12 @@
#ifndef _LINUX_EVENTFD_H
#define _LINUX_EVENTFD_H
-#include <linux/fcntl.h>
#include <linux/wait.h>
#include <linux/err.h>
#include <linux/percpu-defs.h>
#include <linux/percpu.h>
#include <linux/sched.h>
+#include <uapi/linux/eventfd.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -23,10 +23,6 @@
* from eventfd, in order to leave a free define-space for
* shared O_* flags.
*/
-#define EFD_SEMAPHORE (1 << 0)
-#define EFD_CLOEXEC O_CLOEXEC
-#define EFD_NONBLOCK O_NONBLOCK
-
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
@@ -39,8 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
-__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask);
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
@@ -62,15 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
- return -ENOSYS;
-}
-
-static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
- unsigned mask)
-{
- return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
@@ -96,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
#endif
+static inline void eventfd_signal(struct eventfd_ctx *ctx)
+{
+ eventfd_signal_mask(ctx, 0);
+}
+
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 7dc1ee74169f..36ec884320d9 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -31,6 +31,7 @@ extern void evm_inode_post_setxattr(struct dentry *dentry,
const char *xattr_name,
const void *xattr_value,
size_t xattr_value_len);
+extern int evm_inode_copy_up_xattr(const char *name);
extern int evm_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *xattr_name);
extern void evm_inode_post_removexattr(struct dentry *dentry,
@@ -56,9 +57,10 @@ static inline void evm_inode_post_set_acl(struct dentry *dentry,
{
return evm_inode_post_setxattr(dentry, acl_name, NULL, 0);
}
-extern int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm);
+
+int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count);
extern bool evm_revalidate_status(const char *xattr_name);
extern int evm_protected_xattr_if_enabled(const char *req_xattr_name);
extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
@@ -116,6 +118,11 @@ static inline void evm_inode_post_setxattr(struct dentry *dentry,
return;
}
+static inline int evm_inode_copy_up_xattr(const char *name)
+{
+ return 0;
+}
+
static inline int evm_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry,
const char *xattr_name)
@@ -157,9 +164,10 @@ static inline void evm_inode_post_set_acl(struct dentry *dentry,
return;
}
-static inline int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm)
+static inline int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ struct xattr *xattrs,
+ int *xattr_count)
{
return 0;
}
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
index fe7e6ba918f1..d445705ac13c 100644
--- a/include/linux/export-internal.h
+++ b/include/linux/export-internal.h
@@ -10,8 +10,61 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#if defined(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)
+/*
+ * relative reference: this reduces the size by half on 64-bit architectures,
+ * and eliminates the need for absolute relocations that require runtime
+ * processing on relocatable kernels.
+ */
+#define __KSYM_ALIGN ".balign 4"
+#define __KSYM_REF(sym) ".long " #sym "- ."
+#elif defined(CONFIG_64BIT)
+#define __KSYM_ALIGN ".balign 8"
+#define __KSYM_REF(sym) ".quad " #sym
+#else
+#define __KSYM_ALIGN ".balign 4"
+#define __KSYM_REF(sym) ".long " #sym
+#endif
+
+/*
+ * For every exported symbol, do the following:
+ *
+ * - Put the name of the symbol and namespace (empty string "" for none) in
+ * __ksymtab_strings.
+ * - Place a struct kernel_symbol entry in the __ksymtab section.
+ *
+ * Note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+#define __KSYMTAB(name, sym, sec, ns) \
+ asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1" "\n" \
+ "__kstrtab_" #name ":" "\n" \
+ " .asciz \"" #name "\"" "\n" \
+ "__kstrtabns_" #name ":" "\n" \
+ " .asciz \"" ns "\"" "\n" \
+ " .previous" "\n" \
+ " .section \"___ksymtab" sec "+" #name "\", \"a\"" "\n" \
+ __KSYM_ALIGN "\n" \
+ "__ksymtab_" #name ":" "\n" \
+ __KSYM_REF(sym) "\n" \
+ __KSYM_REF(__kstrtab_ ##name) "\n" \
+ __KSYM_REF(__kstrtabns_ ##name) "\n" \
+ " .previous" "\n" \
+ )
+
+#if defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
+#else
+#define KSYM_FUNC(name) name
+#endif
+
+#define KSYMTAB_FUNC(name, sec, ns) __KSYMTAB(name, KSYM_FUNC(name), sec, ns)
+#define KSYMTAB_DATA(name, sec, ns) __KSYMTAB(name, name, sec, ns)
+
#define SYMBOL_CRC(sym, crc, sec) \
asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
+ ".balign 4" "\n" \
"__crc_" #sym ":" "\n" \
".long " #crc "\n" \
".previous" "\n")
diff --git a/include/linux/export.h b/include/linux/export.h
index 3f31ced0d977..0bbd02fd351d 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -2,18 +2,11 @@
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
+#include <linux/compiler.h>
+#include <linux/linkage.h>
#include <linux/stringify.h>
/*
- * Export symbols from the kernel to modules. Forked from module.h
- * to reduce the amount of pointless cruft we feed to gcc when only
- * exporting a simple symbol or two.
- *
- * Try not to add #includes here. It slows compilation and makes kernel
- * hackers place grumpy comments in header files.
- */
-
-/*
* This comment block is used by fixdep. Please do not remove.
*
* When CONFIG_MODVERSIONS is changed from n to y, all source files having
@@ -21,137 +14,60 @@
* side effect of the *.o build rule.
*/
-#ifndef __ASSEMBLY__
-#ifdef MODULE
-extern struct module __this_module;
-#define THIS_MODULE (&__this_module)
-#else
-#define THIS_MODULE ((struct module *)0)
-#endif
-
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#include <linux/compiler.h>
-/*
- * Emit the ksymtab entry as a pair of relative references: this reduces
- * the size by half on 64-bit architectures, and eliminates the need for
- * absolute relocations that require runtime processing on relocatable
- * kernels.
- */
-#define __KSYMTAB_ENTRY(sym, sec) \
- __ADDRESSABLE(sym) \
- asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
- " .balign 4 \n" \
- "__ksymtab_" #sym ": \n" \
- " .long " #sym "- . \n" \
- " .long __kstrtab_" #sym "- . \n" \
- " .long __kstrtabns_" #sym "- . \n" \
- " .previous \n")
-
-struct kernel_symbol {
- int value_offset;
- int name_offset;
- int namespace_offset;
-};
+#ifdef CONFIG_64BIT
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 8 ASM_NL \
+ .quad sym
#else
-#define __KSYMTAB_ENTRY(sym, sec) \
- static const struct kernel_symbol __ksymtab_##sym \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- __aligned(sizeof(void *)) \
- = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
-
-struct kernel_symbol {
- unsigned long value;
- const char *name;
- const char *namespace;
-};
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 4 ASM_NL \
+ .long sym
#endif
-#ifdef __GENKSYMS__
-
-#define ___EXPORT_SYMBOL(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-
-#else
-
-/*
- * For every exported symbol, do the following:
- *
- * - Put the name of the symbol and namespace (empty string "" for none) in
- * __ksymtab_strings.
- * - Place a struct kernel_symbol entry in the __ksymtab section.
- *
- * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
- * section flag requires it. Use '%progbits' instead of '@progbits' since the
- * former apparently works on all arches according to the binutils source.
- */
-#define ___EXPORT_SYMBOL(sym, sec, ns) \
- extern typeof(sym) sym; \
- extern const char __kstrtab_##sym[]; \
- extern const char __kstrtabns_##sym[]; \
- asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \
- "__kstrtab_" #sym ": \n" \
- " .asciz \"" #sym "\" \n" \
- "__kstrtabns_" #sym ": \n" \
- " .asciz \"" ns "\" \n" \
- " .previous \n"); \
- __KSYMTAB_ENTRY(sym, sec)
-
-#endif
+#define ___EXPORT_SYMBOL(sym, license, ns) \
+ .section ".export_symbol","a" ASM_NL \
+ __export_symbol_##sym: ASM_NL \
+ .asciz license ASM_NL \
+ .asciz ns ASM_NL \
+ __EXPORT_SYMBOL_REF(sym) ASM_NL \
+ .previous
-#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS)
+#if defined(__DISABLE_EXPORTS)
/*
* Allow symbol exports to be disabled completely so that C code may
* be reused in other execution contexts such as the UEFI stub or the
* decompressor.
*/
-#define __EXPORT_SYMBOL(sym, sec, ns)
+#define __EXPORT_SYMBOL(sym, license, ns)
-#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+#elif defined(__GENKSYMS__)
-#include <generated/autoksyms.h>
+#define __EXPORT_SYMBOL(sym, license, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-/*
- * For fine grained build dependencies, we want to tell the build system
- * about each possible exported symbol even if they're not actually exported.
- * We use a symbol pattern __ksym_marker_<symbol> that the build system filters
- * from the $(NM) output (see scripts/gen_ksymdeps.sh). These symbols are
- * discarded in the final link stage.
- */
-#define __ksym_marker(sym) \
- static int __ksym_marker_##sym[0] __section(".discard.ksym") __used
-
-#define __EXPORT_SYMBOL(sym, sec, ns) \
- __ksym_marker(sym); \
- __cond_export_sym(sym, sec, ns, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, sec, ns, conf) \
- ___cond_export_sym(sym, sec, ns, conf)
-#define ___cond_export_sym(sym, sec, ns, enabled) \
- __cond_export_sym_##enabled(sym, sec, ns)
-#define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
-
-#ifdef __GENKSYMS__
-#define __cond_export_sym_0(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-#else
-#define __cond_export_sym_0(sym, sec, ns) /* nothing */
-#endif
+#elif defined(__ASSEMBLY__)
+
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ ___EXPORT_SYMBOL(sym, license, ns)
#else
-#define __EXPORT_SYMBOL(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ extern typeof(sym) sym; \
+ __ADDRESSABLE(sym) \
+ asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
-#endif /* CONFIG_MODULES */
+#endif
#ifdef DEFAULT_SYMBOL_NAMESPACE
-#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE))
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE))
#else
-#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, "")
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, "")
#endif
#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
-#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl")
+#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "GPL")
#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns))
-#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", __stringify(ns))
-
-#endif /* !__ASSEMBLY__ */
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns))
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 9edb29101ec8..bb37ad5cc954 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -99,12 +99,29 @@ enum fid_type {
FILEID_FAT_WITH_PARENT = 0x72,
/*
+ * 64 bit inode number, 32 bit generation number.
+ */
+ FILEID_INO64_GEN = 0x81,
+
+ /*
+ * 64 bit inode number, 32 bit generation number,
+ * 64 bit parent inode number, 32 bit parent generation.
+ */
+ FILEID_INO64_GEN_PARENT = 0x82,
+
+ /*
* 128 bit child FID (struct lu_fid)
* 128 bit parent FID (struct lu_fid)
*/
FILEID_LUSTRE = 0x97,
/*
+ * 64 bit inode number, 32 bit subvolume, 32 bit generation number:
+ */
+ FILEID_BCACHEFS_WITHOUT_PARENT = 0xb1,
+ FILEID_BCACHEFS_WITH_PARENT = 0xb2,
+
+ /*
* 64 bit unique kernfs id
*/
FILEID_KERNFS = 0xfe,
@@ -123,7 +140,11 @@ struct fid {
u32 parent_ino;
u32 parent_gen;
} i32;
- struct {
+ struct {
+ u64 ino;
+ u32 gen;
+ } __packed i64;
+ struct {
u32 block;
u16 partref;
u16 parent_partref;
@@ -135,6 +156,9 @@ struct fid {
};
};
+#define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */
+#define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */
+
/**
* struct export_operations - for nfsd to communicate with file systems
* @encode_fh: encode a file handle fragment from a dentry
@@ -150,7 +174,7 @@ struct fid {
* encode_fh:
* @encode_fh should store in the file handle fragment @fh (using at most
* @max_len bytes) information that can be used by @decode_fh to recover the
- * file referred to by the &struct dentry @de. If the @connectable flag is
+ * file referred to by the &struct dentry @de. If @flag has CONNECTABLE bit
* set, the encode_fh() should store sufficient information so that a good
* attempt can be made to find not only the file but also it's place in the
* filesystem. This typically means storing a reference to de->d_parent in
@@ -221,13 +245,63 @@ struct export_operations {
atomic attribute updates
*/
#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
+#define EXPORT_OP_ASYNC_LOCK (0x40) /* fs can do async lock request */
unsigned long flags;
};
+/**
+ * exportfs_lock_op_is_async() - export op supports async lock operation
+ * @export_ops: the nfs export operations to check
+ *
+ * Returns true if the nfs export_operations structure has
+ * EXPORT_OP_ASYNC_LOCK in their flags set
+ */
+static inline bool
+exportfs_lock_op_is_async(const struct export_operations *export_ops)
+{
+ return export_ops->flags & EXPORT_OP_ASYNC_LOCK;
+}
+
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
- int *max_len, struct inode *parent);
+ int *max_len, struct inode *parent,
+ int flags);
extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
- int *max_len, int connectable);
+ int *max_len, int flags);
+
+static inline bool exportfs_can_encode_fid(const struct export_operations *nop)
+{
+ return !nop || nop->encode_fh;
+}
+
+static inline bool exportfs_can_decode_fh(const struct export_operations *nop)
+{
+ return nop && nop->fh_to_dentry;
+}
+
+static inline bool exportfs_can_encode_fh(const struct export_operations *nop,
+ int fh_flags)
+{
+ /*
+ * If a non-decodeable file handle was requested, we only need to make
+ * sure that filesystem did not opt-out of encoding fid.
+ */
+ if (fh_flags & EXPORT_FH_FID)
+ return exportfs_can_encode_fid(nop);
+
+ /*
+ * If a decodeable file handle was requested, we need to make sure that
+ * filesystem can also decode file handles.
+ */
+ return exportfs_can_decode_fh(nop);
+}
+
+static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid,
+ int *max_len)
+{
+ return exportfs_encode_inode_fh(inode, fid, max_len, NULL,
+ EXPORT_FH_FID);
+}
+
extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt,
struct fid *fid, int fh_len,
int fileid_type,
@@ -240,10 +314,12 @@ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
/*
* Generic helpers for filesystems.
*/
-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent);
+struct dentry *generic_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+struct dentry *generic_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 3c45c3846fe9..e596a0abcb27 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -328,16 +328,4 @@ struct extcon_specific_cable_nb {
struct extcon_dev *edev;
unsigned long previous_value;
};
-
-static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
-{
- return -EINVAL;
-}
-
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
-{
- return -EINVAL;
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 1d6402529d10..053137a0fe45 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -13,10 +13,10 @@
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
-#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
-#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
-#define F2FS_BLKSIZE 4096 /* support only 4KB block */
-#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
+#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */
+#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
+#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
+#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
@@ -103,6 +103,8 @@ enum f2fs_error {
ERROR_INCONSISTENT_SIT,
ERROR_CORRUPTED_VERITY_XATTR,
ERROR_CORRUPTED_XATTR,
+ ERROR_INVALID_NODE_REFERENCE,
+ ERROR_INCONSISTENT_NAT,
ERROR_MAX,
};
@@ -209,14 +211,14 @@ struct f2fs_checkpoint {
unsigned char sit_nat_version_bitmap[];
} __packed;
-#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
+#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */
#define CP_MIN_CHKSUM_OFFSET \
(offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
/*
* For orphan inode management
*/
-#define F2FS_ORPHANS_PER_BLOCK 1020
+#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32))
#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
@@ -242,14 +244,31 @@ struct f2fs_extent {
#define F2FS_NAME_LEN 255
/* 200 bytes for inline xattrs by default */
#define DEFAULT_INLINE_XATTR_ADDRS 50
-#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+
+#define OFFSET_OF_END_OF_I_EXT 360
+#define SIZE_OF_I_NID 20
+
+struct node_footer {
+ __le32 nid; /* node id */
+ __le32 ino; /* inode number */
+ __le32 flag; /* include cold/fsync/dentry marks and offset */
+ __le64 cp_ver; /* checkpoint version */
+ __le32 next_blkaddr; /* next node page block address */
+} __packed;
+
+/* Address Pointers in an Inode */
+#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \
+ - SIZE_OF_I_NID \
+ - sizeof(struct node_footer)) / sizeof(__le32))
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
-#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
-#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+/* Node IDs in an Indirect Block */
+#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_PAGE(page, inode) \
(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
@@ -341,14 +360,6 @@ enum {
#define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0)
-struct node_footer {
- __le32 nid; /* node id */
- __le32 ino; /* inode number */
- __le32 flag; /* include cold/fsync/dentry marks and offset */
- __le64 cp_ver; /* checkpoint version */
- __le32 next_blkaddr; /* next node page block address */
-} __packed;
-
struct f2fs_node {
/* can be one of three types: inode, direct, and indirect types */
union {
@@ -362,7 +373,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -377,12 +388,13 @@ struct f2fs_nat_block {
/*
* For SIT entries
*
- * Each segment is 2MB in size by default so that a bitmap for validity of
- * there-in blocks should occupy 64 bytes, 512 bits.
+ * A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size,
+ * this results in a segment size of 2MB. For 16k pages, the default segment size
+ * is 8MB.
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
@@ -417,7 +429,7 @@ struct f2fs_sit_block {
* For segment summary
*
* One summary block contains exactly 512 summary entries, which represents
- * exactly 2MB segment by default. Not allow to change the basic units.
+ * exactly one segment by default. Not allow to change the basic units.
*
* NOTE: For initializing fields, you must use set_summary
*
@@ -428,12 +440,12 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM 512
-#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
+#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
+#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
-/* a summary entry for a 4KB-sized block in a segment */
+/* a summary entry for a block in a segment */
struct f2fs_summary {
__le32 nid; /* parent node id */
union {
@@ -517,7 +529,7 @@ struct f2fs_journal {
};
} __packed;
-/* 4KB-sized summary block structure */
+/* Block-sized summary block structure */
struct f2fs_summary_block {
struct f2fs_summary entries[ENTRIES_IN_SUM];
struct f2fs_journal journal;
@@ -558,11 +570,14 @@ typedef __le32 f2fs_hash_t;
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
*/
-#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
+
+/* the number of dentry in a block */
+#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1))
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
-#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
+#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \
F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
@@ -575,7 +590,7 @@ struct f2fs_dir_entry {
__u8 file_type; /* file type */
} __packed;
-/* 4KB-sized directory entry block */
+/* Block-sized directory entry block */
struct f2fs_dentry_block {
/* validity bitmap for directory entries in each block */
__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 481abf530b3c..6d5edef09d45 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -93,6 +93,15 @@ struct kmem_cache;
bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
+#ifdef CONFIG_FAIL_PAGE_ALLOC
+bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
+#else
+static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+{
+ return false;
+}
+#endif /* CONFIG_FAIL_PAGE_ALLOC */
+
int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#ifdef CONFIG_FAILSLAB
extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 08cb47da71f8..05dc9624897d 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -15,7 +15,8 @@
#include <linux/list.h>
#include <linux/backlight.h>
#include <linux/slab.h>
-#include <asm/io.h>
+
+#include <asm/fb.h>
struct vm_area_struct;
struct fb_info;
@@ -382,7 +383,6 @@ struct fb_tile_ops {
#endif /* CONFIG_FB_TILEBLITTING */
/* FBINFO_* = fb_info.flags bit flags */
-#define FBINFO_DEFAULT 0
#define FBINFO_HWACCEL_DISABLED 0x0002
/* When FBINFO_HWACCEL_DISABLED is set:
* Hardware acceleration is turned off. Software implementations
@@ -480,7 +480,9 @@ struct fb_info {
const struct fb_ops *fbops;
struct device *device; /* This is the parent */
+#if defined(CONFIG_FB_DEVICE)
struct device *dev; /* This is this fb device */
+#endif
int class_flag; /* private sysfs flags */
#ifdef CONFIG_FB_TILEBLITTING
struct fb_tile_ops *tileops; /* Tile Blitting */
@@ -501,8 +503,6 @@ struct fb_info {
bool skip_vt_switch; /* no VT switch on suspend/resume required */
};
-#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
-
/* This will go away
* fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags
* when it wants to turn the acceleration engine on. This is
@@ -511,58 +511,6 @@ struct fb_info {
*/
#define STUPID_ACCELF_TEXT_SHIT
-// This will go away
-#if defined(__sparc__)
-
-/* We map all of our framebuffers such that big-endian accesses
- * are what we want, so the following is sufficient.
- */
-
-// This will go away
-#define fb_readb sbus_readb
-#define fb_readw sbus_readw
-#define fb_readl sbus_readl
-#define fb_readq sbus_readq
-#define fb_writeb sbus_writeb
-#define fb_writew sbus_writew
-#define fb_writel sbus_writel
-#define fb_writeq sbus_writeq
-#define fb_memset sbus_memset_io
-#define fb_memcpy_fromfb sbus_memcpy_fromio
-#define fb_memcpy_tofb sbus_memcpy_toio
-
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \
- defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \
- defined(__arm__) || defined(__aarch64__) || defined(__mips__)
-
-#define fb_readb __raw_readb
-#define fb_readw __raw_readw
-#define fb_readl __raw_readl
-#define fb_readq __raw_readq
-#define fb_writeb __raw_writeb
-#define fb_writew __raw_writew
-#define fb_writel __raw_writel
-#define fb_writeq __raw_writeq
-#define fb_memset memset_io
-#define fb_memcpy_fromfb memcpy_fromio
-#define fb_memcpy_tofb memcpy_toio
-
-#else
-
-#define fb_readb(addr) (*(volatile u8 *) (addr))
-#define fb_readw(addr) (*(volatile u16 *) (addr))
-#define fb_readl(addr) (*(volatile u32 *) (addr))
-#define fb_readq(addr) (*(volatile u64 *) (addr))
-#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b))
-#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b))
-#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b))
-#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b))
-#define fb_memset memset
-#define fb_memcpy_fromfb memcpy
-#define fb_memcpy_tofb memcpy
-
-#endif
-
#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0)
#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \
(val) << (bits))
@@ -576,12 +524,41 @@ struct fb_info {
extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_blank(struct fb_info *info, int blank);
+
+/*
+ * Helpers for framebuffers in I/O memory
+ */
+
extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
+extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
+
+#define __FB_DEFAULT_IOMEM_OPS_RDWR \
+ .fb_read = fb_io_read, \
+ .fb_write = fb_io_write
+
+#define __FB_DEFAULT_IOMEM_OPS_DRAW \
+ .fb_fillrect = cfb_fillrect, \
+ .fb_copyarea = cfb_copyarea, \
+ .fb_imageblit = cfb_imageblit
+
+#define __FB_DEFAULT_IOMEM_OPS_MMAP \
+ .fb_mmap = fb_io_mmap
+
+#define FB_DEFAULT_IOMEM_OPS \
+ __FB_DEFAULT_IOMEM_OPS_RDWR, \
+ __FB_DEFAULT_IOMEM_OPS_DRAW, \
+ __FB_DEFAULT_IOMEM_OPS_MMAP
+
/*
- * Drawing operations where framebuffer is in system RAM
+ * Helpers for framebuffers in system memory
*/
+
extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void sys_imageblit(struct fb_info *info, const struct fb_image *image);
@@ -590,11 +567,31 @@ extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
-/* drivers/video/fbmem.c */
+#define __FB_DEFAULT_SYSMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_SYSMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
+/*
+ * Helpers for framebuffers in DMA-able memory
+ */
+
+#define __FB_DEFAULT_DMAMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_DMAMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
+/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
-extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
-extern int fb_show_logo(struct fb_info *fb_info, int rotate);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
u32 height, u32 shift_high, u32 shift_low, u32 mod);
@@ -605,10 +602,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern bool fb_center_logo;
-extern int fb_logo_count;
-extern struct class *fb_class;
-
static inline void lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
@@ -634,7 +627,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
}
}
-/* drivers/video/fb_defio.c */
+/* fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
@@ -645,6 +638,75 @@ extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);
+/*
+ * Generate callbacks for deferred I/O
+ */
+
+#define __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, __mode) \
+ static ssize_t __prefix ## _defio_read(struct fb_info *info, char __user *buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ return fb_ ## __mode ## _read(info, buf, count, ppos); \
+ } \
+ static ssize_t __prefix ## _defio_write(struct fb_info *info, const char __user *buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ unsigned long offset = *ppos; \
+ ssize_t ret = fb_ ## __mode ## _write(info, buf, count, ppos); \
+ if (ret > 0) \
+ __damage_range(info, offset, ret); \
+ return ret; \
+ }
+
+#define __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, __mode) \
+ static void __prefix ## _defio_fillrect(struct fb_info *info, \
+ const struct fb_fillrect *rect) \
+ { \
+ __mode ## _fillrect(info, rect); \
+ __damage_area(info, rect->dx, rect->dy, rect->width, rect->height); \
+ } \
+ static void __prefix ## _defio_copyarea(struct fb_info *info, \
+ const struct fb_copyarea *area) \
+ { \
+ __mode ## _copyarea(info, area); \
+ __damage_area(info, area->dx, area->dy, area->width, area->height); \
+ } \
+ static void __prefix ## _defio_imageblit(struct fb_info *info, \
+ const struct fb_image *image) \
+ { \
+ __mode ## _imageblit(info, image); \
+ __damage_area(info, image->dx, image->dy, image->width, image->height); \
+ }
+
+#define FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, io) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, cfb)
+
+#define FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
+/*
+ * Initializes struct fb_ops for deferred I/O.
+ */
+
+#define __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix) \
+ .fb_read = __prefix ## _defio_read, \
+ .fb_write = __prefix ## _defio_write
+
+#define __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix) \
+ .fb_fillrect = __prefix ## _defio_fillrect, \
+ .fb_copyarea = __prefix ## _defio_copyarea, \
+ .fb_imageblit = __prefix ## _defio_imageblit
+
+#define __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix) \
+ .fb_mmap = fb_deferred_io_mmap
+
+#define FB_DEFAULT_DEFERRED_OPS(__prefix) \
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix), \
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix), \
+ __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix)
+
static inline bool fb_be_math(struct fb_info *info)
{
#ifdef CONFIG_FB_FOREIGN_ENDIAN
@@ -664,14 +726,11 @@ static inline bool fb_be_math(struct fb_info *info)
#endif /* CONFIG_FB_FOREIGN_ENDIAN */
}
-/* drivers/video/fbsysfs.c */
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
-extern int fb_init_device(struct fb_info *fb_info);
-extern void fb_cleanup_device(struct fb_info *head);
extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
-/* drivers/video/fbmon.c */
+/* fbmon.c */
#define FB_MAXTIMINGS 0
#define FB_VSYNCTIMINGS 1
#define FB_HSYNCTIMINGS 2
@@ -705,7 +764,7 @@ extern int of_get_fb_videomode(struct device_node *np,
extern int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode);
-/* drivers/video/modedb.c */
+/* modedb.c */
#define VESA_MODEDB_SIZE 43
#define DMT_SIZE 0x50
@@ -731,7 +790,7 @@ extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num,
extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs,
struct list_head *head);
-/* drivers/video/fbcmap.c */
+/* fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
@@ -790,7 +849,10 @@ static inline bool fb_modesetting_disabled(const char *drvname)
}
#endif
-/* Convenience logging macros */
+/*
+ * Convenience logging macros
+ */
+
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_notice(info, fmt, ...) \
@@ -802,4 +864,12 @@ static inline bool fb_modesetting_disabled(const char *drvname)
#define fb_dbg(fb_info, fmt, ...) \
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn_once(fb_info, fmt, ...) \
+ pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
+#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \
+ WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_WARN_ON_ONCE(fb_info, x) \
+ fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")")
+
#endif /* _LINUX_FB_H */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index e066816f3519..78c8326d74ae 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -83,12 +83,17 @@ struct dentry;
static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
-
- if (fd < fdt->max_fds) {
- fd = array_index_nospec(fd, fdt->max_fds);
- return rcu_dereference_raw(fdt->fd[fd]);
- }
- return NULL;
+ unsigned long mask = array_index_mask_nospec(fd, fdt->max_fds);
+ struct file *needs_masking;
+
+ /*
+ * 'mask' is zero for an out-of-bounds fd, all ones for ok.
+ * 'fd&mask' is 'fd' for ok, or 0 for out of bounds.
+ *
+ * Accessing fdt->fd[0] is ok, but needs masking of the result.
+ */
+ needs_masking = rcu_dereference_raw(fdt->fd[fd&mask]);
+ return (struct file *)(mask & (unsigned long)needs_masking);
}
static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
@@ -98,20 +103,9 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un
return files_lookup_fd_raw(files, fd);
}
-static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd)
-{
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "suspicious rcu_dereference_check() usage");
- return files_lookup_fd_raw(files, fd);
-}
-
-static inline struct file *lookup_fd_rcu(unsigned int fd)
-{
- return files_lookup_fd_rcu(current->files, fd);
-}
-
-struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd);
-struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd);
+struct file *lookup_fdget_rcu(unsigned int fd);
+struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd);
+struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd);
struct task_struct;
@@ -125,7 +119,7 @@ int iterate_fd(struct files_struct *, unsigned,
extern int close_fd(unsigned int fd);
extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
-extern struct file *close_fd_get_file(unsigned int fd);
+extern struct file *file_close_fd(unsigned int fd);
extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
struct files_struct **new_fdp);
diff --git a/include/linux/file.h b/include/linux/file.h
index 39704eae83e2..6834a29338c4 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/posix_types.h>
#include <linux/errno.h>
+#include <linux/cleanup.h>
struct file;
@@ -80,6 +81,8 @@ static inline void fdput_pos(struct fd f)
fdput(f);
}
+DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
+
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
@@ -88,20 +91,13 @@ extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
-extern void fd_install(unsigned int fd, struct file *file);
+DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
+ get_unused_fd_flags(flags), unsigned flags)
-extern int __receive_fd(struct file *file, int __user *ufd,
- unsigned int o_flags);
+extern void fd_install(unsigned int fd, struct file *file);
-extern int receive_fd(struct file *file, unsigned int o_flags);
+int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags);
-static inline int receive_fd_user(struct file *file, int __user *ufd,
- unsigned int o_flags)
-{
- if (ufd == NULL)
- return -EFAULT;
- return __receive_fd(file, ufd, o_flags);
-}
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
extern void flush_delayed_fput(void);
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
index efcdd1631d9b..95e868e09e29 100644
--- a/include/linux/filelock.h
+++ b/include/linux/filelock.h
@@ -144,7 +144,7 @@ int fcntl_setlk64(unsigned int, struct file *, unsigned int,
struct flock64 *);
#endif
-int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
+int fcntl_setlease(unsigned int fd, struct file *filp, int arg);
int fcntl_getlease(struct file *filp);
/* fs/locks.c */
@@ -167,8 +167,8 @@ bool vfs_inode_has_locks(struct inode *inode);
int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
void lease_get_mtime(struct inode *, struct timespec64 *time);
-int generic_setlease(struct file *, long, struct file_lock **, void **priv);
-int vfs_setlease(struct file *, long, struct file_lock **, void **);
+int generic_setlease(struct file *, int, struct file_lock **, void **priv);
+int vfs_setlease(struct file *, int, struct file_lock **, void **);
int lease_modify(struct file_lock *, int, struct list_head *);
struct notifier_block;
@@ -213,7 +213,7 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file,
return -EACCES;
}
#endif
-static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
{
return -EINVAL;
}
@@ -306,13 +306,13 @@ static inline void lease_get_mtime(struct inode *inode,
return;
}
-static inline int generic_setlease(struct file *filp, long arg,
+static inline int generic_setlease(struct file *filp, int arg,
struct file_lock **flp, void **priv)
{
return -EINVAL;
}
-static inline int vfs_setlease(struct file *filp, long arg,
+static inline int vfs_setlease(struct file *filp, int arg,
struct file_lock **lease, void **priv)
{
return -EINVAL;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index bbce89937fde..68fb6c8142fe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -69,6 +69,9 @@ struct ctl_table_header;
/* unused opcode to mark special load instruction. Same as BPF_ABS */
#define BPF_PROBE_MEM 0x20
+/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
+#define BPF_PROBE_MEMSX 0x40
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
@@ -90,39 +93,49 @@ struct ctl_table_header;
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
-#define BPF_ALU64_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
-#define BPF_ALU32_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG(OP, DST, SRC) \
+ BPF_ALU64_REG_OFF(OP, DST, SRC, 0)
+
+#define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
+#define BPF_ALU32_REG(OP, DST, SRC) \
+ BPF_ALU32_REG_OFF(OP, DST, SRC, 0)
+
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
-#define BPF_ALU64_IMM(OP, DST, IMM) \
+#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU64_IMM(OP, DST, IMM) \
+ BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
-#define BPF_ALU32_IMM(OP, DST, IMM) \
+#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU32_IMM(OP, DST, IMM) \
+ BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
@@ -134,6 +147,16 @@ struct ctl_table_header;
.off = 0, \
.imm = LEN })
+/* Byte Swap, bswap16/32/64 */
+
+#define BPF_BSWAP(DST, LEN) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = LEN })
+
/* Short form of mov, dst_reg = src_reg */
#define BPF_MOV64_REG(DST, SRC) \
@@ -170,6 +193,24 @@ struct ctl_table_header;
.off = 0, \
.imm = IMM })
+/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
+
+#define BPF_MOVSX64_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+#define BPF_MOVSX32_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Special form of mov32, used for doing explicit zero extension on dst. */
#define BPF_ZEXT_REG(DST) \
((struct bpf_insn) { \
@@ -254,6 +295,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
+
+#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
@@ -685,7 +736,7 @@ static inline void bpf_compute_and_save_data_end(
cb->data_end = skb->data + skb_headlen(skb);
}
-/* Restore data saved by bpf_compute_data_pointers(). */
+/* Restore data saved by bpf_compute_and_save_data_end(). */
static inline void bpf_restore_data_end(
struct sk_buff *skb, void *saved_data_end)
{
@@ -765,23 +816,6 @@ DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
u32 xdp_master_redirect(struct xdp_buff *xdp);
-static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
- struct xdp_buff *xdp)
-{
- /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
- * under local_bh_disable(), which provides the needed RCU protection
- * for accessing map entries.
- */
- u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
-
- if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
- if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
- act = xdp_master_redirect(xdp);
- }
-
- return act;
-}
-
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
@@ -874,7 +908,6 @@ void bpf_prog_free(struct bpf_prog *fp);
bool bpf_opcode_in_insntable(u8 code);
-void bpf_prog_free_linfo(struct bpf_prog *prog);
void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
const u32 *insn_to_jit_off);
int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
@@ -921,6 +954,8 @@ bool bpf_jit_needs_zext(void);
bool bpf_jit_supports_subprog_tailcalls(void);
bool bpf_jit_supports_kfunc_call(void);
bool bpf_jit_supports_far_kfunc_call(void);
+bool bpf_jit_supports_exceptions(void);
+void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
static inline bool bpf_dump_raw_ok(const struct cred *cred)
@@ -990,12 +1025,6 @@ int xdp_do_redirect_frame(struct net_device *dev,
struct bpf_prog *prog);
void xdp_do_flush(void);
-/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
- * it is no longer only flushing maps. Keep this define for compatibility
- * until all drivers are updated - do not use xdp_do_flush_map() in new code!
- */
-#define xdp_do_flush_map xdp_do_flush
-
void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET
@@ -1038,7 +1067,7 @@ struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
-void bpf_prog_pack_free(struct bpf_binary_header *hdr);
+void bpf_prog_pack_free(void *ptr, u32 size);
static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
{
@@ -1136,6 +1165,7 @@ const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
+struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
@@ -1203,6 +1233,11 @@ static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
return -ERANGE;
}
+static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+{
+ return NULL;
+}
+
static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
@@ -1294,6 +1329,7 @@ struct bpf_sock_addr_kern {
*/
u64 tmp_reg;
void *t_ctx; /* Attach type specific context. */
+ u32 uaddrlen;
};
struct bpf_sock_ops_kern {
@@ -1581,10 +1617,9 @@ static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
return NULL;
}
-static inline void *bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf,
- unsigned long len, bool flush)
+static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf,
+ unsigned long len, bool flush)
{
- return NULL;
}
#endif /* CONFIG_NET */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index efb6e2cf2034..dd9f2d765e68 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -75,7 +75,7 @@ void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
-extern struct bus_type fw_bus_type;
+extern const struct bus_type fw_bus_type;
struct fw_card_driver;
struct fw_node;
@@ -261,6 +261,15 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data);
+typedef void (*fw_transaction_callback_with_tstamp_t)(struct fw_card *card, int rcode,
+ u32 request_tstamp, u32 response_tstamp, void *data,
+ size_t length, void *callback_data);
+
+union fw_transaction_callback {
+ fw_transaction_callback_t without_tstamp;
+ fw_transaction_callback_with_tstamp_t with_tstamp;
+};
+
/*
* This callback handles an inbound request subaction. It is called in
* RCU read-side context, therefore must not sleep.
@@ -312,6 +321,7 @@ struct fw_transaction {
struct fw_card *card;
bool is_split_transaction;
struct timer_list split_timeout_timer;
+ u32 split_timeout_cycle;
struct fw_packet packet;
@@ -319,7 +329,8 @@ struct fw_transaction {
* The data passed to the callback is valid only during the
* callback.
*/
- fw_transaction_callback_t callback;
+ union fw_transaction_callback callback;
+ bool with_tstamp;
void *callback_data;
};
@@ -345,10 +356,71 @@ void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode);
int fw_get_request_speed(struct fw_request *request);
u32 fw_request_get_timestamp(const struct fw_request *request);
-void fw_send_request(struct fw_card *card, struct fw_transaction *t,
- int tcode, int destination_id, int generation, int speed,
- unsigned long long offset, void *payload, size_t length,
- fw_transaction_callback_t callback, void *callback_data);
+
+void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, union fw_transaction_callback callback,
+ bool with_tstamp, void *callback_data);
+
+/**
+ * fw_send_request() - submit a request packet for transmission to generate callback for response
+ * subaction without time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction without time
+ * stamp.
+ */
+static inline void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed,
+ unsigned long long offset, void *payload, size_t length,
+ fw_transaction_callback_t callback, void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .without_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, false, callback_data);
+}
+
+/**
+ * fw_send_request_with_tstamp() - submit a request packet for transmission to generate callback for
+ * response with time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction with time stamp.
+ */
+static inline void fw_send_request_with_tstamp(struct fw_card *card, struct fw_transaction *t,
+ int tcode, int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, fw_transaction_callback_with_tstamp_t callback,
+ void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .with_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, true, callback_data);
+}
+
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction);
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index de7fea3bca51..0311858b46ce 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -27,6 +27,7 @@ struct firmware {
* @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size
* @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log
* @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry
+ * @FW_UPLOAD_ERR_FW_INVALID: invalid firmware file
* @FW_UPLOAD_ERR_MAX: Maximum error code marker
*/
enum fw_upload_err {
@@ -38,6 +39,7 @@ enum fw_upload_err {
FW_UPLOAD_ERR_INVALID_SIZE,
FW_UPLOAD_ERR_RW_ERROR,
FW_UPLOAD_ERR_WEAROUT,
+ FW_UPLOAD_ERR_FW_INVALID,
FW_UPLOAD_ERR_MAX
};
diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h
index 4f7895a3b73c..1f176a2683fe 100644
--- a/include/linux/firmware/imx/dsp.h
+++ b/include/linux/firmware/imx/dsp.h
@@ -37,17 +37,11 @@ struct imx_dsp_ipc {
static inline void imx_dsp_set_data(struct imx_dsp_ipc *ipc, void *data)
{
- if (!ipc)
- return;
-
ipc->private_data = data;
}
static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc)
{
- if (!ipc)
- return NULL;
-
return ipc->private_data;
}
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 5cc63fe7e84d..df17196df5ff 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -21,31 +21,37 @@ int imx_scu_enable_general_irq_channel(struct device *dev);
int imx_scu_irq_register_notifier(struct notifier_block *nb);
int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_irq_get_status(u8 group, u32 *irq_status);
int imx_scu_soc_init(struct device *dev);
#else
static inline int imx_scu_soc_init(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_enable_general_irq_channel(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_irq_register_notifier(struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_get_status(u8 group, u32 *irq_status)
+{
+ return -EOPNOTSUPP;
}
#endif
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index a718f853d457..ee80ca4bb0d0 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -467,6 +467,31 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
/**
+ * SMC call protocol for Mailbox, starting FUNCID from 60
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_MBOX_SEND_CMD
+ * a1 mailbox command code
+ * a2 physical address that contain mailbox command data (not include header)
+ * a3 mailbox command data size in word
+ * a4 set to 0 for CASUAL, set to 1 for URGENT
+ * a5 physical address for secure firmware to put response data
+ * (not include header)
+ * a6 maximum size in word of physical address to store response data
+ * a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_REJECTED or
+ * INTEL_SIP_SMC_STATUS_ERROR
+ * a1 mailbox error code
+ * a2 response data length in word
+ * a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD 60
+ #define INTEL_SIP_SMC_MBOX_SEND_CMD \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD)
+
+/**
* Request INTEL_SIP_SMC_SVC_VERSION
*
* Sync call used to query the SIP SMC API Version
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 0c16037fd08d..60ed82112680 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -118,6 +118,9 @@ struct stratix10_svc_chan;
* @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version,
* return status is SVC_STATUS_OK
*
+ * @COMMAND_MBOX_SEND_CMD: send generic mailbox command, return status is
+ * SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
* @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status
* return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*
@@ -164,6 +167,8 @@ enum stratix10_svc_command_code {
COMMAND_FCS_RANDOM_NUMBER_GEN,
/* for general status poll */
COMMAND_POLL_SERVICE_STATUS = 40,
+ /* for generic mailbox send command */
+ COMMAND_MBOX_SEND_CMD = 100,
/* Non-mailbox SMC Call */
COMMAND_SMC_SVC_VERSION = 200,
};
diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
index 28fd313340b8..5b1d16fa3f56 100644
--- a/include/linux/firmware/mediatek/mtk-adsp-ipc.h
+++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
@@ -46,17 +46,11 @@ struct mtk_adsp_ipc {
static inline void mtk_adsp_ipc_set_data(struct mtk_adsp_ipc *ipc, void *data)
{
- if (!ipc)
- return;
-
ipc->private_data = data;
}
static inline void *mtk_adsp_ipc_get_data(struct mtk_adsp_ipc *ipc)
{
- if (!ipc)
- return NULL;
-
return ipc->private_data;
}
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 95b0da2326a9..8eaf8922ab02 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -19,7 +19,7 @@ enum {
struct meson_sm_firmware;
int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
- u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+ s32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
unsigned int b_size, unsigned int cmd_index, u32 arg0,
u32 arg1, u32 arg2, u32 arg3, u32 arg4);
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
new file mode 100644
index 000000000000..5c28298a98be
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Qualcomm Secure Execution Environment (SEE) interface (QSEECOM).
+ * Responsible for setting up and managing QSEECOM client devices.
+ *
+ * Copyright (C) 2023 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef __QCOM_QSEECOM_H
+#define __QCOM_QSEECOM_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/types.h>
+
+#include <linux/firmware/qcom/qcom_scm.h>
+
+/**
+ * struct qseecom_client - QSEECOM client device.
+ * @aux_dev: Underlying auxiliary device.
+ * @app_id: ID of the loaded application.
+ */
+struct qseecom_client {
+ struct auxiliary_device aux_dev;
+ u32 app_id;
+};
+
+/**
+ * qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
+ * @client: The QSEECOM client associated with the target app.
+ * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req_size: Size of the request buffer.
+ * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp_size: Size of the response buffer.
+ *
+ * Sends a request to the QSEE app associated with the given client and read
+ * back its response. The caller must provide two DMA memory regions, one for
+ * the request and one for the response, and fill out the @req region with the
+ * respective (app-specific) request data. The QSEE app reads this and returns
+ * its response in the @rsp region.
+ *
+ * Note: This is a convenience wrapper around qcom_scm_qseecom_app_send().
+ * Clients should prefer to use this wrapper.
+ *
+ * Return: Zero on success, nonzero on failure.
+ */
+static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
+{
+ return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
+}
+
+#endif /* __QCOM_QSEECOM_H */
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index 250ea4efb7cb..ccaf28846054 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -59,12 +59,12 @@ enum qcom_scm_ice_cipher {
#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
-extern bool qcom_scm_is_available(void);
+bool qcom_scm_is_available(void);
-extern int qcom_scm_set_cold_boot_addr(void *entry);
-extern int qcom_scm_set_warm_boot_addr(void *entry);
-extern void qcom_scm_cpu_power_down(u32 flags);
-extern int qcom_scm_set_remote_state(u32 state, u32 id);
+int qcom_scm_set_cold_boot_addr(void *entry);
+int qcom_scm_set_warm_boot_addr(void *entry);
+void qcom_scm_cpu_power_down(u32 flags);
+int qcom_scm_set_remote_state(u32 state, u32 id);
struct qcom_scm_pas_metadata {
void *ptr;
@@ -72,54 +72,69 @@ struct qcom_scm_pas_metadata {
ssize_t size;
};
-extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size,
- struct qcom_scm_pas_metadata *ctx);
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+ struct qcom_scm_pas_metadata *ctx);
void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
-extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size);
-extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
-extern int qcom_scm_pas_shutdown(u32 peripheral);
-extern bool qcom_scm_pas_supported(u32 peripheral);
-
-extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
-extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
-
-extern bool qcom_scm_restore_sec_cfg_available(void);
-extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
-extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
-extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
-extern int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
-extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
- u32 cp_nonpixel_start,
- u32 cp_nonpixel_size);
-extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- u64 *src,
- const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt);
-
-extern bool qcom_scm_ocmem_lock_available(void);
-extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size, u32 mode);
-extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size);
-
-extern bool qcom_scm_ice_available(void);
-extern int qcom_scm_ice_invalidate_key(u32 index);
-extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
- enum qcom_scm_ice_cipher cipher,
- u32 data_unit_size);
-
-extern bool qcom_scm_hdcp_available(void);
-extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp);
-
-extern int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt);
-extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
-
-extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
- u64 limit_node, u32 node_id, u64 version);
-extern int qcom_scm_lmh_profile_change(u32 profile_id);
-extern bool qcom_scm_lmh_dcvsh_available(void);
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+int qcom_scm_pas_auth_and_reset(u32 peripheral);
+int qcom_scm_pas_shutdown(u32 peripheral);
+bool qcom_scm_pas_supported(u32 peripheral);
+
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+
+bool qcom_scm_restore_sec_cfg_available(void);
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
+int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start, u32 cp_nonpixel_size);
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, u64 *src,
+ const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt);
+
+bool qcom_scm_ocmem_lock_available(void);
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+ u32 mode);
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size);
+
+bool qcom_scm_ice_available(void);
+int qcom_scm_ice_invalidate_key(u32 index);
+int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher, u32 data_unit_size);
+
+bool qcom_scm_hdcp_available(void);
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+
+int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
+
+int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version);
+int qcom_scm_lmh_profile_change(u32 profile_id);
+bool qcom_scm_lmh_dcvsh_available(void);
+
+#ifdef CONFIG_QCOM_QSEECOM
+
+int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
+int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
+ size_t rsp_size);
+
+#else /* CONFIG_QCOM_QSEECOM */
+
+static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
+{
+ return -EINVAL;
+}
+
+static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req,
+ size_t req_size, void *rsp,
+ size_t rsp_size)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_QCOM_QSEECOM */
#endif
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index f5da51677069..9a7e52739251 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -4,7 +4,7 @@
*
* Copyright (C) 2014-2021 Xilinx
*
- * Michal Simek <michal.simek@xilinx.com>
+ * Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
* Rajan Vaja <rajanv@xilinx.com>
@@ -32,8 +32,25 @@
#define PM_SIP_SVC 0xC2000000
/* PM API versions */
+#define PM_API_VERSION_1 1
#define PM_API_VERSION_2 2
+#define PM_PINCTRL_PARAM_SET_VERSION 2
+
+#define ZYNQMP_FAMILY_CODE 0x23
+#define VERSAL_FAMILY_CODE 0x26
+
+/* When all subfamily of platform need to support */
+#define ALL_SUB_FAMILY_CODE 0x00
+#define VERSAL_SUB_FAMILY_CODE 0x01
+#define VERSALNET_SUB_FAMILY_CODE 0x03
+
+#define FAMILY_CODE_MASK GENMASK(27, 21)
+#define SUB_FAMILY_CODE_MASK GENMASK(20, 19)
+
+#define API_ID_MASK GENMASK(7, 0)
+#define MODULE_ID_MASK GENMASK(11, 8)
+
/* ATF only commands */
#define TF_A_PM_REGISTER_SGI 0xa04
#define PM_GET_TRUSTZONE_VERSION 0xa03
@@ -78,15 +95,41 @@
/*
* Node IDs for the Error Events.
*/
-#define EVENT_ERROR_PMC_ERR1 (0x28100000U)
-#define EVENT_ERROR_PMC_ERR2 (0x28104000U)
-#define EVENT_ERROR_PSM_ERR1 (0x28108000U)
-#define EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+#define VERSAL_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR1 (0x28108000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR3 (0x28108000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR1 (0x2810C000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR2 (0x28110000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR3 (0x28114000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR4 (0x28118000U)
/* ZynqMP SD tap delay tuning */
#define SD_ITAPDLY 0xFF180314
#define SD_OTAPDLYSEL 0xFF180318
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_CR: Error event mask for DDRMC MC Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_CR BIT(18)
+
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_NCR: Error event mask for DDRMC MC Non-Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_NCR BIT(19)
+#define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13)
+#define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12)
+
+enum pm_module_id {
+ PM_MODULE_ID = 0x0,
+ XSEM_MODULE_ID = 0x3,
+ TF_A_MODULE_ID = 0xa,
+};
+
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
PM_ACKNOWLEDGE_CB = 31,
@@ -94,6 +137,7 @@ enum pm_api_cb_id {
};
enum pm_api_id {
+ PM_API_FEATURES = 0,
PM_GET_API_VERSION = 1,
PM_REGISTER_NOTIFIER = 5,
PM_FORCE_POWERDOWN = 8,
@@ -113,7 +157,6 @@ enum pm_api_id {
PM_SECURE_SHA = 26,
PM_PINCTRL_REQUEST = 28,
PM_PINCTRL_RELEASE = 29,
- PM_PINCTRL_GET_FUNCTION = 30,
PM_PINCTRL_SET_FUNCTION = 31,
PM_PINCTRL_CONFIG_PARAM_GET = 32,
PM_PINCTRL_CONFIG_PARAM_SET = 33,
@@ -124,8 +167,6 @@ enum pm_api_id {
PM_CLOCK_GETSTATE = 38,
PM_CLOCK_SETDIVIDER = 39,
PM_CLOCK_GETDIVIDER = 40,
- PM_CLOCK_SETRATE = 41,
- PM_CLOCK_GETRATE = 42,
PM_CLOCK_SETPARENT = 43,
PM_CLOCK_GETPARENT = 44,
PM_FPGA_READ = 46,
@@ -136,7 +177,9 @@ enum pm_api_id {
/* PMU-FW return status codes */
enum pm_ret_status {
XST_PM_SUCCESS = 0,
+ XST_PM_INVALID_VERSION = 4,
XST_PM_NO_FEATURE = 19,
+ XST_PM_INVALID_CRC = 301,
XST_PM_INTERNAL = 2000,
XST_PM_CONFLICT = 2001,
XST_PM_NO_ACCESS = 2002,
@@ -484,20 +527,18 @@ struct zynqmp_pm_query_data {
u32 arg3;
};
-int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 *ret_payload);
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
int zynqmp_pm_get_api_version(u32 *version);
int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily);
int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
int zynqmp_pm_clock_enable(u32 clock_id);
int zynqmp_pm_clock_disable(u32 clock_id);
int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
-int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate);
-int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate);
int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
@@ -534,7 +575,6 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
int zynqmp_pm_set_boot_health_status(u32 value);
int zynqmp_pm_pinctrl_request(const u32 pin);
int zynqmp_pm_pinctrl_release(const u32 pin);
-int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id);
int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id);
int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
u32 *value);
@@ -571,6 +611,11 @@ static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
return -ENODEV;
}
+static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
u32 *out)
{
@@ -602,16 +647,6 @@ static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
return -ENODEV;
}
-static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
-{
- return -ENODEV;
-}
-
-static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
return -ENODEV;
@@ -781,11 +816,6 @@ static inline int zynqmp_pm_pinctrl_release(const u32 pin)
return -ENODEV;
}
-static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
{
return -ENODEV;
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index c9de1f59ee80..89a6888f2f9e 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -20,7 +20,7 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
({ \
char *__p = (char *)(p); \
size_t __ret = SIZE_MAX; \
- size_t __p_size = __member_size(p); \
+ const size_t __p_size = __member_size(p); \
if (__p_size != SIZE_MAX && \
__builtin_constant_p(*__p)) { \
size_t __p_len = __p_size - 1; \
@@ -93,13 +93,9 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
#if __has_builtin(__builtin_dynamic_object_size)
#define POS __pass_dynamic_object_size(1)
#define POS0 __pass_dynamic_object_size(0)
-#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
-#define __member_size(p) __builtin_dynamic_object_size(p, 1)
#else
#define POS __pass_object_size(1)
#define POS0 __pass_object_size(0)
-#define __struct_size(p) __builtin_object_size(p, 0)
-#define __member_size(p) __builtin_object_size(p, 1)
#endif
#define __compiletime_lessthan(bounds, length) ( \
@@ -142,7 +138,7 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
{
- size_t p_size = __member_size(p);
+ const size_t p_size = __member_size(p);
if (__compiletime_lessthan(p_size, size))
__write_overflow();
@@ -151,33 +147,6 @@ char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
return __underlying_strncpy(p, q, size);
}
-/**
- * strcat - Append a string to an existing string
- *
- * @p: pointer to NUL-terminated string to append to
- * @q: pointer to NUL-terminated source string to append from
- *
- * Do not use this function. While FORTIFY_SOURCE tries to avoid
- * read and write overflows, this is only possible when the
- * destination buffer size is known to the compiler. Prefer
- * building the string with formatting, via scnprintf() or similar.
- * At the very least, use strncat().
- *
- * Returns @p.
- *
- */
-__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
-char *strcat(char * const POS p, const char *q)
-{
- size_t p_size = __member_size(p);
-
- if (p_size == SIZE_MAX)
- return __underlying_strcat(p, q);
- if (strlcat(p, q, p_size) >= p_size)
- fortify_panic(__func__);
- return p;
-}
-
extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
/**
* strnlen - Return bounded count of characters in a NUL-terminated string
@@ -191,8 +160,8 @@ extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(st
*/
__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
{
- size_t p_size = __member_size(p);
- size_t p_len = __compiletime_strlen(p);
+ const size_t p_size = __member_size(p);
+ const size_t p_len = __compiletime_strlen(p);
size_t ret;
/* We can take compile-time actions when maxlen is const. */
@@ -233,8 +202,8 @@ __FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size
__FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
__kernel_size_t __fortify_strlen(const char * const POS p)
{
+ const size_t p_size = __member_size(p);
__kernel_size_t ret;
- size_t p_size = __member_size(p);
/* Give up if we don't know how large p is. */
if (p_size == SIZE_MAX)
@@ -245,51 +214,6 @@ __kernel_size_t __fortify_strlen(const char * const POS p)
return ret;
}
-/* Defined after fortified strlen() to reuse it. */
-extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
-/**
- * strlcpy - Copy a string into another string buffer
- *
- * @p: pointer to destination of copy
- * @q: pointer to NUL-terminated source string to copy
- * @size: maximum number of bytes to write at @p
- *
- * If strlen(@q) >= @size, the copy of @q will be truncated at
- * @size - 1 bytes. @p will always be NUL-terminated.
- *
- * Do not use this function. While FORTIFY_SOURCE tries to avoid
- * over-reads when calculating strlen(@q), it is still possible.
- * Prefer strscpy(), though note its different return values for
- * detecting truncation.
- *
- * Returns total number of bytes written to @p, including terminating NUL.
- *
- */
-__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
-{
- size_t p_size = __member_size(p);
- size_t q_size = __member_size(q);
- size_t q_len; /* Full count of source string length. */
- size_t len; /* Count of characters going into destination. */
-
- if (p_size == SIZE_MAX && q_size == SIZE_MAX)
- return __real_strlcpy(p, q, size);
- q_len = strlen(q);
- len = (q_len >= size) ? size - 1 : q_len;
- if (__builtin_constant_p(size) && __builtin_constant_p(q_len) && size) {
- /* Write size is always larger than destination. */
- if (len >= p_size)
- __write_overflow();
- }
- if (size) {
- if (len >= p_size)
- fortify_panic(__func__);
- __underlying_memcpy(p, q, len);
- p[len] = '\0';
- }
- return q_len;
-}
-
/* Defined after fortified strnlen() to reuse it. */
extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
/**
@@ -299,16 +223,10 @@ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
* @q: Where to copy the string from
* @size: Size of destination buffer
*
- * Copy the source string @p, or as much of it as fits, into the destination
- * @q buffer. The behavior is undefined if the string buffers overlap. The
+ * Copy the source string @q, or as much of it as fits, into the destination
+ * @p buffer. The behavior is undefined if the string buffers overlap. The
* destination @p buffer is always NUL terminated, unless it's zero-sized.
*
- * Preferred to strlcpy() since the API doesn't require reading memory
- * from the source @q string beyond the specified @size bytes, and since
- * the return value is easier to error-check than strlcpy()'s.
- * In addition, the implementation is robust to the string changing out
- * from underneath it, unlike the current strlcpy() implementation.
- *
* Preferred to strncpy() since it always returns a valid string, and
* doesn't unnecessarily force the tail of the destination buffer to be
* zero padded. If padding is desired please use strscpy_pad().
@@ -318,10 +236,10 @@ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
*/
__FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, size_t size)
{
- size_t len;
/* Use string size rather than possible enclosing struct size. */
- size_t p_size = __member_size(p);
- size_t q_size = __member_size(q);
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t len;
/* If we cannot get size of p and q default to call strscpy. */
if (p_size == SIZE_MAX && q_size == SIZE_MAX)
@@ -371,6 +289,96 @@ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, s
return __real_strscpy(p, q, len);
}
+/* Defined after fortified strlen() to reuse it. */
+extern size_t __real_strlcat(char *p, const char *q, size_t avail) __RENAME(strlcat);
+/**
+ * strlcat - Append a string to an existing string
+ *
+ * @p: pointer to %NUL-terminated string to append to
+ * @q: pointer to %NUL-terminated string to append from
+ * @avail: Maximum bytes available in @p
+ *
+ * Appends %NUL-terminated string @q after the %NUL-terminated
+ * string at @p, but will not write beyond @avail bytes total,
+ * potentially truncating the copy from @q. @p will stay
+ * %NUL-terminated only if a %NUL already existed within
+ * the @avail bytes of @p. If so, the resulting number of
+ * bytes copied from @q will be at most "@avail - strlen(@p) - 1".
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the sizes
+ * of @p and @q are known to the compiler. Prefer building the
+ * string with formatting, via scnprintf(), seq_buf, or similar.
+ *
+ * Returns total bytes that _would_ have been contained by @p
+ * regardless of truncation, similar to snprintf(). If return
+ * value is >= @avail, the string has been truncated.
+ *
+ */
+__FORTIFY_INLINE
+size_t strlcat(char * const POS p, const char * const POS q, size_t avail)
+{
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t p_len, copy_len;
+ size_t actual, wanted;
+
+ /* Give up immediately if both buffer sizes are unknown. */
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strlcat(p, q, avail);
+
+ p_len = strnlen(p, avail);
+ copy_len = strlen(q);
+ wanted = actual = p_len + copy_len;
+
+ /* Cannot append any more: report truncation. */
+ if (avail <= p_len)
+ return wanted;
+
+ /* Give up if string is already overflowed. */
+ if (p_size <= p_len)
+ fortify_panic(__func__);
+
+ if (actual >= avail) {
+ copy_len = avail - p_len - 1;
+ actual = p_len + copy_len;
+ }
+
+ /* Give up if copy will overflow. */
+ if (p_size <= actual)
+ fortify_panic(__func__);
+ __underlying_memcpy(p + p_len, q, copy_len);
+ p[actual] = '\0';
+
+ return wanted;
+}
+
+/* Defined after fortified strlcat() to reuse it. */
+/**
+ * strcat - Append a string to an existing string
+ *
+ * @p: pointer to NUL-terminated string to append to
+ * @q: pointer to NUL-terminated source string to append from
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the
+ * destination buffer size is known to the compiler. Prefer
+ * building the string with formatting, via scnprintf() or similar.
+ * At the very least, use strncat().
+ *
+ * Returns @p.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
+char *strcat(char * const POS p, const char *q)
+{
+ const size_t p_size = __member_size(p);
+
+ if (strlcat(p, q, p_size) >= p_size)
+ fortify_panic(__func__);
+ return p;
+}
+
/**
* strncat - Append a string to an existing string
*
@@ -394,9 +402,9 @@ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, s
__FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
{
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
size_t p_len, copy_len;
- size_t p_size = __member_size(p);
- size_t q_size = __member_size(q);
if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __underlying_strncat(p, q, count);
@@ -580,7 +588,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
__q_size_field, #op), \
#op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
__fortify_size, \
- "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \
+ "field \"" #p "\" at " FILE_LINE, \
__p_size_field); \
__underlying_##op(p, q, __fortify_size); \
})
@@ -639,7 +647,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
{
- size_t p_size = __struct_size(p);
+ const size_t p_size = __struct_size(p);
if (__compiletime_lessthan(p_size, size))
__read_overflow();
@@ -651,8 +659,8 @@ __FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
{
- size_t p_size = __struct_size(p);
- size_t q_size = __struct_size(q);
+ const size_t p_size = __struct_size(p);
+ const size_t q_size = __struct_size(q);
if (__builtin_constant_p(size)) {
if (__compiletime_lessthan(p_size, size))
@@ -668,7 +676,7 @@ int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t
__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
{
- size_t p_size = __struct_size(p);
+ const size_t p_size = __struct_size(p);
if (__compiletime_lessthan(p_size, size))
__read_overflow();
@@ -680,7 +688,7 @@ void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
{
- size_t p_size = __struct_size(p);
+ const size_t p_size = __struct_size(p);
if (__compiletime_lessthan(p_size, size))
__read_overflow();
@@ -693,7 +701,7 @@ extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kme
__realloc_size(2);
__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp)
{
- size_t p_size = __struct_size(p);
+ const size_t p_size = __struct_size(p);
if (__compiletime_lessthan(p_size, size))
__read_overflow();
@@ -720,8 +728,8 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp
__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
char *strcpy(char * const POS p, const char * const POS q)
{
- size_t p_size = __member_size(p);
- size_t q_size = __member_size(q);
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
size_t size;
/* If neither buffer size is known, immediately give up. */
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
index 47fefc7f363b..3e03758151f4 100644
--- a/include/linux/fprobe.h
+++ b/include/linux/fprobe.h
@@ -35,9 +35,11 @@ struct fprobe {
int nr_maxactive;
int (*entry_handler)(struct fprobe *fp, unsigned long entry_ip,
- struct pt_regs *regs, void *entry_data);
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *entry_data);
void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip,
- struct pt_regs *regs, void *entry_data);
+ unsigned long ret_ip, struct pt_regs *regs,
+ void *entry_data);
};
/* This fprobe is soft-disabled. */
@@ -64,6 +66,7 @@ int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter
int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
int unregister_fprobe(struct fprobe *fp);
+bool fprobe_is_registered(struct fprobe *fp);
#else
static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
{
@@ -81,6 +84,10 @@ static inline int unregister_fprobe(struct fprobe *fp)
{
return -EOPNOTSUPP;
}
+static inline bool fprobe_is_registered(struct fprobe *fp)
+{
+ return false;
+}
#endif
/**
diff --git a/include/linux/framer/framer-provider.h b/include/linux/framer/framer-provider.h
new file mode 100644
index 000000000000..782cd5fc83d5
--- /dev/null
+++ b/include/linux/framer/framer-provider.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer profider header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_PROVIDER_FRAMER_H
+#define __DRIVERS_PROVIDER_FRAMER_H
+
+#include <linux/export.h>
+#include <linux/framer/framer.h>
+#include <linux/types.h>
+
+#define FRAMER_FLAG_POLL_STATUS BIT(0)
+
+/**
+ * struct framer_ops - set of function pointers for performing framer operations
+ * @init: operation to be performed for initializing the framer
+ * @exit: operation to be performed while exiting
+ * @power_on: powering on the framer
+ * @power_off: powering off the framer
+ * @flags: OR-ed flags (FRAMER_FLAG_*) to ask for core functionality
+ * - @FRAMER_FLAG_POLL_STATUS:
+ * Ask the core to perform a polling to get the framer status and
+ * notify consumers on change.
+ * The framer should call @framer_notify_status_change() when it
+ * detects a status change. This is usually done using interrupts.
+ * If the framer cannot detect this change, it can ask the core for
+ * a status polling. The core will call @get_status() periodically
+ * and, on change detected, it will notify the consumer.
+ * the @get_status()
+ * @owner: the module owner containing the ops
+ */
+struct framer_ops {
+ int (*init)(struct framer *framer);
+ void (*exit)(struct framer *framer);
+ int (*power_on)(struct framer *framer);
+ int (*power_off)(struct framer *framer);
+
+ /**
+ * @get_status:
+ *
+ * Optional.
+ *
+ * Used to get the framer status. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_status)(struct framer *framer, struct framer_status *status);
+
+ /**
+ * @set_config:
+ *
+ * Optional.
+ *
+ * Used to set the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*set_config)(struct framer *framer, const struct framer_config *config);
+
+ /**
+ * @get_config:
+ *
+ * Optional.
+ *
+ * Used to get the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_config)(struct framer *framer, struct framer_config *config);
+
+ u32 flags;
+ struct module *owner;
+};
+
+/**
+ * struct framer_provider - represents the framer provider
+ * @dev: framer provider device
+ * @children: can be used to override the default (dev->of_node) child node
+ * @owner: the module owner having of_xlate
+ * @list: to maintain a linked list of framer providers
+ * @of_xlate: function pointer to obtain framer instance from framer pointer
+ */
+struct framer_provider {
+ struct device *dev;
+ struct module *owner;
+ struct list_head list;
+ struct framer * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args);
+};
+
+static inline void framer_set_drvdata(struct framer *framer, void *data)
+{
+ dev_set_drvdata(&framer->dev, data);
+}
+
+static inline void *framer_get_drvdata(struct framer *framer)
+{
+ return dev_get_drvdata(&framer->dev);
+}
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+
+/* Create and destroy a framer */
+struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+void framer_destroy(struct framer *framer);
+
+/* devm version */
+struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+
+struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ struct of_phandle_args *args);
+
+struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider);
+
+struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
+
+void framer_notify_status_change(struct framer *framer);
+
+#else /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+static inline struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void framer_destroy(struct framer *framer)
+{
+}
+
+/* devm version */
+static inline struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider)
+{
+}
+
+static inline struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_notify_status_change(struct framer *framer)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+#define framer_provider_of_register(dev, xlate) \
+ __framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#define devm_framer_provider_of_register(dev, xlate) \
+ __devm_framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#endif /* __DRIVERS_PROVIDER_FRAMER_H */
diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h
new file mode 100644
index 000000000000..9a9b88962c29
--- /dev/null
+++ b/include/linux/framer/framer.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_FRAMER_H
+#define __DRIVERS_FRAMER_H
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+/**
+ * enum framer_iface - Framer interface
+ * @FRAMER_IFACE_E1: E1 interface
+ * @FRAMER_IFACE_T1: T1 interface
+ */
+enum framer_iface {
+ FRAMER_IFACE_E1,
+ FRAMER_IFACE_T1,
+};
+
+/**
+ * enum framer_clock_type - Framer clock type
+ * @FRAMER_CLOCK_EXT: External clock
+ * @FRAMER_CLOCK_INT: Internal clock
+ */
+enum framer_clock_type {
+ FRAMER_CLOCK_EXT,
+ FRAMER_CLOCK_INT,
+};
+
+/**
+ * struct framer_config - Framer configuration
+ * @iface: Framer line interface
+ * @clock_type: Framer clock type
+ * @line_clock_rate: Framer line clock rate
+ */
+struct framer_config {
+ enum framer_iface iface;
+ enum framer_clock_type clock_type;
+ unsigned long line_clock_rate;
+};
+
+/**
+ * struct framer_status - Framer status
+ * @link_is_on: Framer link state. true, the link is on, false, the link is off.
+ */
+struct framer_status {
+ bool link_is_on;
+};
+
+/**
+ * enum framer_event - Event available for notification
+ * @FRAMER_EVENT_STATUS: Event notified on framer_status changes
+ */
+enum framer_event {
+ FRAMER_EVENT_STATUS,
+};
+
+/**
+ * struct framer - represents the framer device
+ * @dev: framer device
+ * @id: id of the framer device
+ * @ops: function pointers for performing framer operations
+ * @mutex: mutex to protect framer_ops
+ * @init_count: used to protect when the framer is used by multiple consumers
+ * @power_count: used to protect when the framer is used by multiple consumers
+ * @pwr: power regulator associated with the framer
+ * @notify_status_work: work structure used for status notifications
+ * @notifier_list: notifier list used for notifications
+ * @polling_work: delayed work structure used for the polling task
+ * @prev_status: previous read status used by the polling task to detect changes
+ */
+struct framer {
+ struct device dev;
+ int id;
+ const struct framer_ops *ops;
+ struct mutex mutex; /* Protect framer */
+ int init_count;
+ int power_count;
+ struct regulator *pwr;
+ struct work_struct notify_status_work;
+ struct blocking_notifier_head notifier_list;
+ struct delayed_work polling_work;
+ struct framer_status prev_status;
+};
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+int framer_pm_runtime_get(struct framer *framer);
+int framer_pm_runtime_get_sync(struct framer *framer);
+int framer_pm_runtime_put(struct framer *framer);
+int framer_pm_runtime_put_sync(struct framer *framer);
+int framer_init(struct framer *framer);
+int framer_exit(struct framer *framer);
+int framer_power_on(struct framer *framer);
+int framer_power_off(struct framer *framer);
+int framer_get_status(struct framer *framer, struct framer_status *status);
+int framer_get_config(struct framer *framer, struct framer_config *config);
+int framer_set_config(struct framer *framer, const struct framer_config *config);
+int framer_notifier_register(struct framer *framer, struct notifier_block *nb);
+int framer_notifier_unregister(struct framer *framer, struct notifier_block *nb);
+
+struct framer *framer_get(struct device *dev, const char *con_id);
+void framer_put(struct device *dev, struct framer *framer);
+
+struct framer *devm_framer_get(struct device *dev, const char *con_id);
+struct framer *devm_framer_optional_get(struct device *dev, const char *con_id);
+#else
+static inline int framer_pm_runtime_get(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_get_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_init(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_exit(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_on(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_off(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_status(struct framer *framer, struct framer_status *status)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_config(struct framer *framer, struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_set_config(struct framer *framer, const struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_register(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_unregister(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+struct framer *framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_put(struct device *dev, struct framer *framer)
+{
+}
+
+static inline struct framer *devm_framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *devm_framer_optional_get(struct device *dev, const char *con_id)
+{
+ return NULL;
+}
+
+#endif
+
+#endif /* __DRIVERS_FRAMER_H */
diff --git a/include/linux/framer/pef2256.h b/include/linux/framer/pef2256.h
new file mode 100644
index 000000000000..71d80af58c40
--- /dev/null
+++ b/include/linux/framer/pef2256.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PEF2256 consumer API
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+#ifndef __PEF2256_H__
+#define __PEF2256_H__
+
+#include <linux/types.h>
+
+struct pef2256;
+struct regmap;
+
+/* Retrieve the PEF2256 regmap */
+struct regmap *pef2256_get_regmap(struct pef2256 *pef2256);
+
+/* PEF2256 hardware versions */
+enum pef2256_version {
+ PEF2256_VERSION_UNKNOWN,
+ PEF2256_VERSION_1_2,
+ PEF2256_VERSION_2_1,
+ PEF2256_VERSION_2_2,
+};
+
+/* Get the PEF2256 hardware version */
+enum pef2256_version pef2256_get_version(struct pef2256 *pef2256);
+
+#endif /* __PEF2256_H__ */
diff --git a/include/linux/freelist.h b/include/linux/freelist.h
deleted file mode 100644
index fc1842b96469..000000000000
--- a/include/linux/freelist.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
-#ifndef FREELIST_H
-#define FREELIST_H
-
-#include <linux/atomic.h>
-
-/*
- * Copyright: cameron@moodycamel.com
- *
- * A simple CAS-based lock-free free list. Not the fastest thing in the world
- * under heavy contention, but simple and correct (assuming nodes are never
- * freed until after the free list is destroyed), and fairly speedy under low
- * contention.
- *
- * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
- */
-
-struct freelist_node {
- atomic_t refs;
- struct freelist_node *next;
-};
-
-struct freelist_head {
- struct freelist_node *head;
-};
-
-#define REFS_ON_FREELIST 0x80000000
-#define REFS_MASK 0x7FFFFFFF
-
-static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
-{
- /*
- * Since the refcount is zero, and nobody can increase it once it's
- * zero (except us, and we run only one copy of this method per node at
- * a time, i.e. the single thread case), then we know we can safely
- * change the next pointer of the node; however, once the refcount is
- * back above zero, then other threads could increase it (happens under
- * heavy contention, when the refcount goes to zero in between a load
- * and a refcount increment of a node in try_get, then back up to
- * something non-zero, then the refcount increment is done by the other
- * thread) -- so if the CAS to add the node to the actual list fails,
- * decrese the refcount and leave the add operation to the next thread
- * who puts the refcount back to zero (which could be us, hence the
- * loop).
- */
- struct freelist_node *head = READ_ONCE(list->head);
-
- for (;;) {
- WRITE_ONCE(node->next, head);
- atomic_set_release(&node->refs, 1);
-
- if (!try_cmpxchg_release(&list->head, &head, node)) {
- /*
- * Hmm, the add failed, but we can only try again when
- * the refcount goes back to zero.
- */
- if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
- continue;
- }
- return;
- }
-}
-
-static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
-{
- /*
- * We know that the should-be-on-freelist bit is 0 at this point, so
- * it's safe to set it using a fetch_add.
- */
- if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
- /*
- * Oh look! We were the last ones referencing this node, and we
- * know we want to add it to the free list, so let's do it!
- */
- __freelist_add(node, list);
- }
-}
-
-static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
-{
- struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
- unsigned int refs;
-
- while (head) {
- prev = head;
- refs = atomic_read(&head->refs);
- if ((refs & REFS_MASK) == 0 ||
- !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
- head = smp_load_acquire(&list->head);
- continue;
- }
-
- /*
- * Good, reference count has been incremented (it wasn't at
- * zero), which means we can read the next and not worry about
- * it changing between now and the time we do the CAS.
- */
- next = READ_ONCE(head->next);
- if (try_cmpxchg_acquire(&list->head, &head, next)) {
- /*
- * Yay, got the node. This means it was on the list,
- * which means should-be-on-freelist must be false no
- * matter the refcount (because nobody else knows it's
- * been taken off yet, it can't have been put back on).
- */
- WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
-
- /*
- * Decrease refcount twice, once for our ref, and once
- * for the list's ref.
- */
- atomic_fetch_add(-2, &head->refs);
-
- return head;
- }
-
- /*
- * OK, the head must have changed on us, but we still need to decrement
- * the refcount we increased.
- */
- refs = atomic_fetch_add(-1, &prev->refs);
- if (refs == REFS_ON_FREELIST + 1)
- __freelist_add(prev, list);
- }
-
- return NULL;
-}
-
-#endif /* FREELIST_H */
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
deleted file mode 100644
index a631bac12220..000000000000
--- a/include/linux/frontswap.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FRONTSWAP_H
-#define _LINUX_FRONTSWAP_H
-
-#include <linux/swap.h>
-#include <linux/mm.h>
-#include <linux/bitops.h>
-#include <linux/jump_label.h>
-
-struct frontswap_ops {
- void (*init)(unsigned); /* this swap type was just swapon'ed */
- int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
- int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
- void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
- void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
-};
-
-int frontswap_register_ops(const struct frontswap_ops *ops);
-
-extern void frontswap_init(unsigned type, unsigned long *map);
-extern int __frontswap_store(struct page *page);
-extern int __frontswap_load(struct page *page);
-extern void __frontswap_invalidate_page(unsigned, pgoff_t);
-extern void __frontswap_invalidate_area(unsigned);
-
-#ifdef CONFIG_FRONTSWAP
-extern struct static_key_false frontswap_enabled_key;
-
-static inline bool frontswap_enabled(void)
-{
- return static_branch_unlikely(&frontswap_enabled_key);
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
- p->frontswap_map = map;
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return p->frontswap_map;
-}
-#else
-/* all inline routines become no-ops and all externs are ignored */
-
-static inline bool frontswap_enabled(void)
-{
- return false;
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return NULL;
-}
-#endif
-
-static inline int frontswap_store(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_store(page);
-
- return -1;
-}
-
-static inline int frontswap_load(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_load(page);
-
- return -1;
-}
-
-static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_page(type, offset);
-}
-
-static inline void frontswap_invalidate_area(unsigned type)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_area(type);
-}
-
-#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 133f0640fb24..1fbc72c5f112 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -67,7 +67,7 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
-struct fscrypt_info;
+struct fscrypt_inode_info;
struct fscrypt_operations;
struct fsverity_info;
struct fsverity_operations;
@@ -119,13 +119,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_PWRITE ((__force fmode_t)0x10)
/* File is opened for execution with sys_execve / sys_uselib */
#define FMODE_EXEC ((__force fmode_t)0x20)
-/* File is opened with O_NDELAY (only set for block devices) */
-#define FMODE_NDELAY ((__force fmode_t)0x40)
-/* File is opened with O_EXCL (only set for block devices) */
-#define FMODE_EXCL ((__force fmode_t)0x80)
-/* File is opened using open(.., 3, ..) and is writeable only for ioctls
- (specialy hack for floppy.c) */
-#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
/* 32bit hashes as llseek() offset (for directories) */
#define FMODE_32BITHASH ((__force fmode_t)0x200)
/* 64bit hashes as llseek() offset (for directories) */
@@ -171,6 +164,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File supports non-exclusive O_DIRECT writes from multiple threads */
#define FMODE_DIO_PARALLEL_WRITE ((__force fmode_t)0x1000000)
+/* File is embedded in backing_file object */
+#define FMODE_BACKING ((__force fmode_t)0x2000000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
@@ -342,6 +338,22 @@ enum rw_hint {
#define IOCB_NOIO (1 << 20)
/* can use bio alloc cache */
#define IOCB_ALLOC_CACHE (1 << 21)
+/*
+ * IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the
+ * iocb completion can be passed back to the owner for execution from a safe
+ * context rather than needing to be punted through a workqueue. If this
+ * flag is set, the bio completion handling may set iocb->dio_complete to a
+ * handler function and iocb->private to context information for that handler.
+ * The issuer should call the handler with that context information from task
+ * context to complete the processing of the iocb. Note that while this
+ * provides a task context for the dio_complete() callback, it should only be
+ * used on the completion side for non-IO generating completions. It's fine to
+ * call blocking functions from this callback, but they should not wait for
+ * unrelated IO (like cache flushing, new IO generation, etc).
+ */
+#define IOCB_DIO_CALLER_COMP (1 << 22)
+/* kiocb is a read or write operation submitted by fs/aio.c. */
+#define IOCB_AIO_RW (1 << 23)
/* for use in trace events */
#define TRACE_IOCB_STRINGS \
@@ -355,7 +367,8 @@ enum rw_hint {
{ IOCB_WRITE, "WRITE" }, \
{ IOCB_WAITQ, "WAITQ" }, \
{ IOCB_NOIO, "NOIO" }, \
- { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }
+ { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
+ { IOCB_DIO_CALLER_COMP, "CALLER_COMP" }
struct kiocb {
struct file *ki_filp;
@@ -364,7 +377,23 @@ struct kiocb {
void *private;
int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */
- struct wait_page_queue *ki_waitq; /* for async buffered IO */
+ union {
+ /*
+ * Only used for async buffered reads, where it denotes the
+ * page waitqueue associated with completing the read. Valid
+ * IFF IOCB_WAITQ is set.
+ */
+ struct wait_page_queue *ki_waitq;
+ /*
+ * Can be used for O_DIRECT IO, where the completion handling
+ * is punted back to the issuer of the IO. May only be set
+ * if IOCB_DIO_CALLER_COMP is set by the issuer, and the issuer
+ * must then check for presence of this handler when ki_complete
+ * is invoked. The data passed in to this handler must be
+ * assigned to ->private when dio_complete is assigned.
+ */
+ ssize_t (*dio_complete)(void *data);
+ };
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -407,7 +436,7 @@ struct address_space_operations {
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
- int (*error_remove_page)(struct address_space *, struct page *);
+ int (*error_remove_folio)(struct address_space *, struct folio *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
@@ -427,7 +456,7 @@ extern const struct address_space_operations empty_aops;
* It is also used to block modification of page cache contents through
* memory mappings.
* @gfp_mask: Memory allocation flags to use for allocating pages.
- * @i_mmap_writable: Number of VM_SHARED mappings.
+ * @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings.
* @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
@@ -436,9 +465,9 @@ extern const struct address_space_operations empty_aops;
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
- * @private_lock: For use by the owner of the address_space.
- * @private_list: For use by the owner of the address_space.
- * @private_data: For use by the owner of the address_space.
+ * @i_private_lock: For use by the owner of the address_space.
+ * @i_private_list: For use by the owner of the address_space.
+ * @i_private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host;
@@ -451,15 +480,15 @@ struct address_space {
atomic_t nr_thps;
#endif
struct rb_root_cached i_mmap;
- struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
pgoff_t writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
+ struct rw_semaphore i_mmap_rwsem;
errseq_t wb_err;
- spinlock_t private_lock;
- struct list_head private_list;
- void *private_data;
+ spinlock_t i_private_lock;
+ struct list_head i_private_list;
+ void * i_private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@@ -530,7 +559,7 @@ static inline int mapping_mapped(struct address_space *mapping)
/*
* Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
+ * Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*
@@ -644,9 +673,9 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec64 i_atime;
- struct timespec64 i_mtime;
- struct timespec64 i_ctime;
+ struct timespec64 __i_atime;
+ struct timespec64 __i_mtime;
+ struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
u8 i_blkbits;
@@ -711,7 +740,7 @@ struct inode {
#endif
#ifdef CONFIG_FS_ENCRYPTION
- struct fscrypt_info *i_crypt_info;
+ struct fscrypt_inode_info *i_crypt_info;
#endif
#ifdef CONFIG_FS_VERITY
@@ -956,29 +985,37 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
index < ra->start + ra->size);
}
+/*
+ * f_{lock,count,pos_lock} members can be highly contended and share
+ * the same cacheline. f_{lock,mode} are very frequently used together
+ * and so share the same cacheline as well. The read-mostly
+ * f_{path,inode,op} are kept on a separate cacheline.
+ */
struct file {
union {
+ /* fput() uses task work when closing and freeing file (default). */
+ struct callback_head f_task_work;
+ /* fput() must use workqueue (most kernel threads). */
struct llist_node f_llist;
- struct rcu_head f_rcuhead;
unsigned int f_iocb_flags;
};
- struct path f_path;
- struct inode *f_inode; /* cached value */
- const struct file_operations *f_op;
/*
* Protects f_ep, f_flags.
* Must not be taken from IRQ context.
*/
spinlock_t f_lock;
- atomic_long_t f_count;
- unsigned int f_flags;
fmode_t f_mode;
+ atomic_long_t f_count;
struct mutex f_pos_lock;
loff_t f_pos;
+ unsigned int f_flags;
struct fown_struct f_owner;
const struct cred *f_cred;
struct file_ra_state f_ra;
+ struct path f_path;
+ struct inode *f_inode; /* cached value */
+ const struct file_operations *f_op;
u64 f_version;
#ifdef CONFIG_SECURITY
@@ -1009,7 +1046,10 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
-#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
+
+struct file *get_file_rcu(struct file __rcu **f);
+struct file *get_file_active(struct file **f);
+
#define file_count(x) atomic_long_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -1067,7 +1107,7 @@ extern void fasync_free(struct fasync_struct *);
extern void kill_fasync(struct fasync_struct **, int, int);
extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
-extern int f_setown(struct file *filp, unsigned long arg, int force);
+extern int f_setown(struct file *filp, int who, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
@@ -1086,13 +1126,15 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_NOATIME BIT(10) /* Do not update access times. */
#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
#define SB_SILENT BIT(15)
-#define SB_POSIXACL BIT(16) /* VFS does not apply the umask */
+#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
#define SB_I_VERSION BIT(23) /* Update inode I_version field */
#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
/* These sb flags are internal to the kernel */
+#define SB_DEAD BIT(21)
+#define SB_DYING BIT(24)
#define SB_SUBMOUNT BIT(26)
#define SB_FORCE BIT(27)
#define SB_NOSEC BIT(28)
@@ -1126,11 +1168,13 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_EVM_UNSUPPORTED 0x00000080
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
+#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
/* Possible states of 'frozen' field */
enum {
@@ -1145,8 +1189,9 @@ enum {
#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
struct sb_writers {
- int frozen; /* Is sb frozen? */
- wait_queue_head_t wait_unfrozen; /* wait for thaw */
+ unsigned short frozen; /* Is sb frozen? */
+ int freeze_kcount; /* How many kernel freeze requests? */
+ int freeze_ucount; /* How many userspace freeze requests? */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1171,7 +1216,7 @@ struct super_block {
#ifdef CONFIG_SECURITY
void *s_security;
#endif
- const struct xattr_handler **s_xattr;
+ const struct xattr_handler * const *s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
@@ -1186,6 +1231,7 @@ struct super_block {
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
+ struct bdev_handle *s_bdev_handle;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct hlist_node s_instances;
@@ -1215,7 +1261,6 @@ struct super_block {
uuid_t s_uuid; /* UUID */
unsigned int s_max_links;
- fmode_t s_mode;
/*
* The next field is for VFS *only*. No filesystems have any business
@@ -1231,7 +1276,7 @@ struct super_block {
const struct dentry_operations *s_d_op; /* default d_op for dentries */
- struct shrinker s_shrink; /* per-sb shrinker handle */
+ struct shrinker *s_shrink; /* per-sb shrinker handle */
/* Number of inodes with nlink == 0 but still referenced */
atomic_long_t s_remove_count;
@@ -1242,7 +1287,7 @@ struct super_block {
*/
atomic_long_t s_fsnotify_connectors;
- /* Being remounted read-only */
+ /* Read-only state of the superblock is being changed */
int s_readonly_remount;
/* per-sb errseq_t for reporting writeback errors via syncfs */
@@ -1474,7 +1519,109 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
kgid_has_mapping(fs_userns, kgid);
}
-extern struct timespec64 current_time(struct inode *inode);
+struct timespec64 current_time(struct inode *inode);
+struct timespec64 inode_set_ctime_current(struct inode *inode);
+
+static inline time64_t inode_get_atime_sec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_sec;
+}
+
+static inline long inode_get_atime_nsec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_atime(const struct inode *inode)
+{
+ return inode->__i_atime;
+}
+
+static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_atime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_atime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_atime_to_ts(inode, ts);
+}
+
+static inline time64_t inode_get_mtime_sec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_sec;
+}
+
+static inline long inode_get_mtime_nsec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_mtime(const struct inode *inode)
+{
+ return inode->__i_mtime;
+}
+
+static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_mtime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_mtime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_mtime_to_ts(inode, ts);
+}
+
+static inline time64_t inode_get_ctime_sec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_sec;
+}
+
+static inline long inode_get_ctime_nsec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_ctime(const struct inode *inode)
+{
+ return inode->__i_ctime;
+}
+
+static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_ctime = ts;
+ return ts;
+}
+
+/**
+ * inode_set_ctime - set the ctime in the inode
+ * @inode: inode in which to set the ctime
+ * @sec: tv_sec value to set
+ * @nsec: tv_nsec value to set
+ *
+ * Set the ctime in @inode to { @sec, @nsec }
+ */
+static inline struct timespec64 inode_set_ctime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+
+ return inode_set_ctime_to_ts(inode, ts);
+}
+
+struct timespec64 simple_inode_init_ts(struct inode *inode);
/*
* Snapshotting support.
@@ -1504,9 +1651,70 @@ static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
#define __sb_writers_release(sb, lev) \
percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+/**
+ * __sb_write_started - check if sb freeze level is held
+ * @sb: the super we write to
+ * @level: the freeze level
+ *
+ * * > 0 - sb freeze level is held
+ * * 0 - sb freeze level is not held
+ * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
+ */
+static inline int __sb_write_started(const struct super_block *sb, int level)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
+}
+
+/**
+ * sb_write_started - check if SB_FREEZE_WRITE is held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
static inline bool sb_write_started(const struct super_block *sb)
{
- return lockdep_is_held_type(sb->s_writers.rw_sem + SB_FREEZE_WRITE - 1, 1);
+ return __sb_write_started(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_not_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
+}
+
+/**
+ * file_write_started - check if SB_FREEZE_WRITE is held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_started(file_inode(file)->i_sb);
+}
+
+/**
+ * file_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_not_started(const struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_not_started(file_inode(file)->i_sb);
}
/**
@@ -1672,9 +1880,12 @@ static inline int vfs_whiteout(struct mnt_idmap *idmap,
WHITEOUT_DEV);
}
-struct file *vfs_tmpfile_open(struct mnt_idmap *idmap,
- const struct path *parentpath,
- umode_t mode, int open_flag, const struct cred *cred);
+struct file *kernel_tmpfile_open(struct mnt_idmap *idmap,
+ const struct path *parentpath,
+ umode_t mode, int open_flag,
+ const struct cred *cred);
+struct file *kernel_file_open(const struct path *path, int flags,
+ struct inode *inode, const struct cred *cred);
int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
@@ -1767,6 +1978,7 @@ struct dir_context {
struct iov_iter;
struct io_uring_cmd;
+struct offset_ctx;
struct file_operations {
struct module *owner;
@@ -1777,7 +1989,6 @@ struct file_operations {
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
unsigned int flags);
- int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -1790,13 +2001,13 @@ struct file_operations {
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
- ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
- int (*setlease)(struct file *, long, struct file_lock **, void **);
+ void (*splice_eof)(struct file *file);
+ int (*setlease)(struct file *, int, struct file_lock **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f);
@@ -1814,6 +2025,13 @@ struct file_operations {
unsigned int poll_flags);
} __randomize_layout;
+/* Wrap a directory iterator that needs exclusive inode access */
+int wrap_directory_iterator(struct file *, struct dir_context *,
+ int (*) (struct file *, struct dir_context *));
+#define WRAP_DIR_ITER(x) \
+ static int shared_##x(struct file *file , struct dir_context *ctx) \
+ { return wrap_directory_iterator(file, ctx, x); }
+
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
@@ -1841,7 +2059,7 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, struct timespec64 *, int);
+ int (*update_time)(struct inode *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
@@ -1854,6 +2072,7 @@ struct inode_operations {
int (*fileattr_set)(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa);
int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
+ struct offset_ctx *(*get_offset_ctx)(struct inode *inode);
} ____cacheline_aligned;
static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
@@ -1877,9 +2096,6 @@ extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- size_t len, unsigned int flags);
int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *len, unsigned int remap_flags,
@@ -1887,9 +2103,6 @@ int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t *count, unsigned int remap_flags);
-extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t len, unsigned int remap_flags);
extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
@@ -1899,6 +2112,25 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
+/**
+ * enum freeze_holder - holder of the freeze
+ * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
+ * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
+ * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ *
+ * Indicate who the owner of the freeze or thaw request is and whether
+ * the freeze needs to be exclusive or can nest.
+ * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
+ * same holder aren't allowed. It is however allowed to hold a single
+ * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
+ * the same time. This is relied upon by some filesystems during online
+ * repair or similar.
+ */
+enum freeze_holder {
+ FREEZE_HOLDER_KERNEL = (1U << 0),
+ FREEZE_HOLDER_USERSPACE = (1U << 1),
+ FREEZE_MAY_NEST = (1U << 2),
+};
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
@@ -1911,9 +2143,9 @@ struct super_operations {
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
- int (*freeze_super) (struct super_block *);
+ int (*freeze_super) (struct super_block *, enum freeze_holder who);
int (*freeze_fs) (struct super_block *);
- int (*thaw_super) (struct super_block *);
+ int (*thaw_super) (struct super_block *, enum freeze_holder who);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
@@ -1932,6 +2164,7 @@ struct super_operations {
struct shrink_control *);
long (*free_cached_objects)(struct super_block *,
struct shrink_control *);
+ void (*shutdown)(struct super_block *sb);
};
/*
@@ -1988,7 +2221,12 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+
+#ifdef CONFIG_FS_POSIX_ACL
#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
+#else
+#define IS_POSIXACL(inode) 0
+#endif
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
@@ -2132,7 +2370,7 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
#define I_CREATING (1 << 15)
#define I_DONTCACHE (1 << 16)
#define I_SYNC_QUEUED (1 << 17)
-#define I_PINNING_FSCACHE_WB (1 << 18)
+#define I_PINNING_NETFS_WB (1 << 18)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2190,7 +2428,7 @@ enum file_time_flags {
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
-int inode_update_time(struct inode *inode, struct timespec64 *time, int flags);
+int inode_update_time(struct inode *inode, int flags);
static inline void file_accessed(struct file *file)
{
@@ -2263,6 +2501,7 @@ struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
+struct super_block *sget_dev(struct fs_context *fc, dev_t dev);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
@@ -2286,8 +2525,8 @@ extern int unregister_filesystem(struct file_system_type *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-extern int freeze_super(struct super_block *super);
-extern int thaw_super(struct super_block *super);
+int freeze_super(struct super_block *super, enum freeze_holder who);
+int thaw_super(struct super_block *super, enum freeze_holder who);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
@@ -2296,7 +2535,8 @@ extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
-extern int generic_update_time(struct inode *, struct timespec64 *, int);
+int inode_update_timestamps(struct inode *inode, int flags);
+int generic_update_time(struct inode *, int);
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -2308,13 +2548,13 @@ struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
- int refcnt;
+ atomic_t refcnt;
struct audit_names *aname;
const char iname[];
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
-static inline struct mnt_idmap *file_mnt_idmap(struct file *file)
+static inline struct mnt_idmap *file_mnt_idmap(const struct file *file)
{
return mnt_idmap(file->f_path.mnt);
}
@@ -2349,11 +2589,36 @@ static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root},
name, flags, mode);
}
-extern struct file * dentry_open(const struct path *, int, const struct cred *);
-extern struct file *dentry_create(const struct path *path, int flags,
- umode_t mode, const struct cred *cred);
-extern struct file * open_with_fake_path(const struct path *, int,
- struct inode*, const struct cred *);
+struct file *dentry_open(const struct path *path, int flags,
+ const struct cred *creds);
+struct file *dentry_create(const struct path *path, int flags, umode_t mode,
+ const struct cred *cred);
+struct path *backing_file_user_path(struct file *f);
+
+/*
+ * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
+ * stored in ->vm_file is a backing file whose f_inode is on the underlying
+ * filesystem. When the mapped file path and inode number are displayed to
+ * user (e.g. via /proc/<pid>/maps), these helpers should be used to get the
+ * path and inode number to display to the user, which is the path of the fd
+ * that user has requested to map and the inode number that would be returned
+ * by fstat() on that same fd.
+ */
+/* Get the path to display in /proc/<pid>/maps */
+static inline const struct path *file_user_path(struct file *f)
+{
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return backing_file_user_path(f);
+ return &f->f_path;
+}
+/* Get the inode whose inode number to display in /proc/<pid>/maps */
+static inline const struct inode *file_user_inode(struct file *f)
+{
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return d_inode(backing_file_user_path(f)->dentry);
+ return file_inode(f);
+}
+
static inline struct file *file_clone_open(struct file *file)
{
return dentry_open(&file->f_path, file->f_flags, file->f_cred);
@@ -2515,6 +2780,13 @@ static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
return (inode->i_mode ^ mode) & S_IFMT;
}
+/**
+ * file_start_write - get write access to a superblock for regular file io
+ * @file: the file we want to write to
+ *
+ * This is a variant of sb_start_write() which is a noop on non-regualr file.
+ * Should be matched with a call to file_end_write().
+ */
static inline void file_start_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
@@ -2529,11 +2801,53 @@ static inline bool file_start_write_trylock(struct file *file)
return sb_start_write_trylock(file_inode(file)->i_sb);
}
+/**
+ * file_end_write - drop write access to a superblock of a regular file
+ * @file: the file we wrote to
+ *
+ * Should be matched with a call to file_start_write().
+ */
static inline void file_end_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(file_inode(file)->i_sb);
+}
+
+/**
+ * kiocb_start_write - get write access to a superblock for async file io
+ * @iocb: the io context we want to submit the write with
+ *
+ * This is a variant of sb_start_write() for async io submission.
+ * Should be matched with a call to kiocb_end_write().
+ */
+static inline void kiocb_start_write(struct kiocb *iocb)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ sb_start_write(inode->i_sb);
+ /*
+ * Fool lockdep by telling it the lock got released so that it
+ * doesn't complain about the held lock when we return to userspace.
+ */
+ __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * kiocb_end_write - drop write access to a superblock after async file io
+ * @iocb: the io context we sumbitted the write with
+ *
+ * Should be matched with a call to kiocb_start_write().
+ */
+static inline void kiocb_end_write(struct kiocb *iocb)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ /*
+ * Tell lockdep we inherited freeze protection from submission thread.
+ */
+ __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(inode->i_sb);
}
/*
@@ -2583,8 +2897,7 @@ static inline bool inode_is_open_for_write(const struct inode *inode)
#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
static inline void i_readcount_dec(struct inode *inode)
{
- BUG_ON(!atomic_read(&inode->i_readcount));
- atomic_dec(&inode->i_readcount);
+ BUG_ON(atomic_dec_return(&inode->i_readcount) < 0);
}
static inline void i_readcount_inc(struct inode *inode)
{
@@ -2669,7 +2982,7 @@ extern void evict_inodes(struct super_block *sb);
void dump_mapping(const struct address_space *);
/*
- * Userspace may rely on the the inode number being non-zero. For example, glibc
+ * Userspace may rely on the inode number being non-zero. For example, glibc
* simply ignores files with zero i_ino in unlink() and other places.
*
* As an additional complication, if userspace was compiled with
@@ -2738,6 +3051,8 @@ extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
+ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
+ ssize_t direct_written, ssize_t buffered_written);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
@@ -2752,17 +3067,11 @@ ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
-ssize_t direct_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len, unsigned int flags);
-extern ssize_t generic_file_splice_read(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
+ssize_t copy_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
-extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
- struct file *out, loff_t *, size_t len, unsigned int flags);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags);
extern void
@@ -2837,11 +3146,6 @@ static inline void inode_dio_end(struct inode *inode)
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
-/*
- * Warn about a page cache invalidation failure diring a direct I/O write.
- */
-void dio_warn_stale_pagecache(struct file *filp);
-
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
@@ -2857,7 +3161,7 @@ extern void page_put_link(void *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-void generic_fillattr(struct mnt_idmap *, struct inode *, struct kstat *);
+void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
@@ -2896,8 +3200,6 @@ extern int vfs_readlink(struct dentry *, char __user *, int);
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_super(struct block_device *);
-extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
@@ -2917,6 +3219,8 @@ extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
+void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename(struct mnt_idmap *, struct inode *,
@@ -2933,7 +3237,7 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
-extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
+extern int simple_nosetlease(struct file *, int, struct file_lock **, void **);
extern const struct dentry_operations simple_dentry_operations;
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
@@ -2954,6 +3258,22 @@ extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void __user *from, size_t count);
+struct offset_ctx {
+ struct xarray xa;
+ u32 next_offset;
+};
+
+void simple_offset_init(struct offset_ctx *octx);
+int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
+void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
+int simple_offset_rename_exchange(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry);
+void simple_offset_destroy(struct offset_ctx *octx);
+
+extern const struct file_operations simple_offset_dir_operations;
+
extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index ff6341e09925..c13e99cbbf81 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -109,6 +109,7 @@ struct fs_context {
bool need_free:1; /* Need to call ops->free() */
bool global:1; /* Goes into &init_user_ns */
bool oldapi:1; /* Coming from mount(2) */
+ bool exclusive:1; /* create new superblock, reject existing one */
};
struct fs_context_operations {
@@ -135,6 +136,8 @@ extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
const char *value, size_t v_size);
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+ char *(*sep)(char **));
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc);
@@ -150,14 +153,13 @@ extern int get_tree_nodev(struct fs_context *fc,
extern int get_tree_single(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
-extern int get_tree_single_reconf(struct fs_context *fc,
- int (*fill_super)(struct super_block *sb,
- struct fs_context *fc));
extern int get_tree_keyed(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc),
void *key);
+int setup_bdev_super(struct super_block *sb, int sb_flags,
+ struct fs_context *fc);
extern int get_tree_bdev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
deleted file mode 100644
index 77d783f71527..000000000000
--- a/include/linux/fs_enet_pd.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Platform information definitions for the
- * universal Freescale Ethernet driver.
- *
- * Copyright (c) 2003 Intracom S.A.
- * by Pantelis Antoniou <panto@intracom.gr>
- *
- * 2005 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_ENET_PD_H
-#define FS_ENET_PD_H
-
-#include <linux/clk.h>
-#include <linux/string.h>
-#include <linux/of_mdio.h>
-#include <linux/if_ether.h>
-#include <asm/types.h>
-
-#define FS_ENET_NAME "fs_enet"
-
-enum fs_id {
- fsid_fec1,
- fsid_fec2,
- fsid_fcc1,
- fsid_fcc2,
- fsid_fcc3,
- fsid_scc1,
- fsid_scc2,
- fsid_scc3,
- fsid_scc4,
-};
-
-#define FS_MAX_INDEX 9
-
-static inline int fs_get_fec_index(enum fs_id id)
-{
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id - fsid_fec1;
- return -1;
-}
-
-static inline int fs_get_fcc_index(enum fs_id id)
-{
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id - fsid_fcc1;
- return -1;
-}
-
-static inline int fs_get_scc_index(enum fs_id id)
-{
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id - fsid_scc1;
- return -1;
-}
-
-static inline int fs_fec_index2id(int index)
-{
- int id = fsid_fec1 + index - 1;
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id;
- return FS_MAX_INDEX;
- }
-
-static inline int fs_fcc_index2id(int index)
-{
- int id = fsid_fcc1 + index - 1;
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id;
- return FS_MAX_INDEX;
-}
-
-static inline int fs_scc_index2id(int index)
-{
- int id = fsid_scc1 + index - 1;
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id;
- return FS_MAX_INDEX;
-}
-
-enum fs_mii_method {
- fsmii_fixed,
- fsmii_fec,
- fsmii_bitbang,
-};
-
-enum fs_ioport {
- fsiop_porta,
- fsiop_portb,
- fsiop_portc,
- fsiop_portd,
- fsiop_porte,
-};
-
-struct fs_mii_bit {
- u32 offset;
- u8 bit;
- u8 polarity;
-};
-struct fs_mii_bb_platform_info {
- struct fs_mii_bit mdio_dir;
- struct fs_mii_bit mdio_dat;
- struct fs_mii_bit mdc_dat;
- int delay; /* delay in us */
- int irq[32]; /* irqs per phy's */
-};
-
-struct fs_platform_info {
-
- void(*init_ioports)(struct fs_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
-
- u32 cp_page; /* CPM page */
- u32 cp_block; /* CPM sblock */
- u32 cp_command; /* CPM page/sblock/mcn */
-
- u32 clk_trx; /* some stuff for pins & mux configuration*/
- u32 clk_rx;
- u32 clk_tx;
- u32 clk_route;
- u32 clk_mask;
-
- u32 mem_offset;
- u32 dpram_offset;
- u32 fcc_regs_c;
-
- u32 device_flags;
-
- struct device_node *phy_node;
- const struct fs_mii_bus_info *bus_info;
-
- int rx_ring, tx_ring; /* number of buffers on rx */
- __u8 macaddr[ETH_ALEN]; /* mac address */
- int rx_copybreak; /* limit we copy small frames */
- int napi_weight; /* NAPI weight */
-
- int use_rmii; /* use RMII mode */
- int has_phy; /* if the network is phy container as well...*/
-
- struct clk *clk_per; /* 'per' clock for register access */
-};
-struct fs_mii_fec_platform_info {
- u32 irq[32];
- u32 mii_speed;
-};
-
-static inline int fs_get_id(struct fs_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SCC"))
- return fs_scc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FCC"))
- return fs_fcc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FEC"))
- return fs_fec_index2id(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index 54210a42c30d..2b1f74b24070 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -16,15 +16,15 @@ extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
static inline void fsstack_copy_attr_atime(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
}
static inline void fsstack_copy_attr_times(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
- dest->i_mtime = src->i_mtime;
- dest->i_ctime = src->i_ctime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
+ inode_set_mtime_to_ts(dest, inode_get_mtime(src));
+ inode_set_ctime_to_ts(dest, inode_get_ctime(src));
}
#endif /* _LINUX_FS_STACK_H */
diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h
deleted file mode 100644
index 36b61ff39277..000000000000
--- a/include/linux/fs_uart_pd.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Platform information definitions for the CPM Uart driver.
- *
- * 2006 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_UART_PD_H
-#define FS_UART_PD_H
-
-#include <asm/types.h>
-
-enum fs_uart_id {
- fsid_smc1_uart,
- fsid_smc2_uart,
- fsid_scc1_uart,
- fsid_scc2_uart,
- fsid_scc3_uart,
- fsid_scc4_uart,
- fs_uart_nr,
-};
-
-static inline int fs_uart_id_scc2fsid(int id)
-{
- return fsid_scc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2scc(int id)
-{
- return id - fsid_scc1_uart + 1;
-}
-
-static inline int fs_uart_id_smc2fsid(int id)
-{
- return fsid_smc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2smc(int id)
-{
- return id - fsid_smc1_uart + 1;
-}
-
-struct fs_uart_platform_info {
- void(*init_ioports)(struct fs_uart_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
- u32 uart_clk;
- u8 tx_num_fifo;
- u8 tx_buf_size;
- u8 rx_num_fifo;
- u8 rx_buf_size;
- u8 brg;
- u8 clk_rx;
- u8 clk_tx;
-};
-
-static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SMC"))
- return fs_uart_id_smc2fsid(fpi->fs_no);
- if(strstr(fpi->fs_type, "SCC"))
- return fs_uart_id_scc2fsid(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index a174cedf4d90..bdf7f3eddf0a 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -189,17 +189,20 @@ extern atomic_t fscache_n_write;
extern atomic_t fscache_n_no_write_space;
extern atomic_t fscache_n_no_create_space;
extern atomic_t fscache_n_culled;
+extern atomic_t fscache_n_dio_misfit;
#define fscache_count_read() atomic_inc(&fscache_n_read)
#define fscache_count_write() atomic_inc(&fscache_n_write)
#define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
#define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
#define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#define fscache_count_dio_misfit() atomic_inc(&fscache_n_dio_misfit)
#else
#define fscache_count_read() do {} while(0)
#define fscache_count_write() do {} while(0)
#define fscache_count_no_write_space() do {} while(0)
#define fscache_count_no_create_space() do {} while(0)
#define fscache_count_culled() do {} while(0)
+#define fscache_count_dio_misfit() do {} while(0)
#endif
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 8e312c8323a8..6e8562cbcc43 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -437,9 +437,6 @@ const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_r
* indicates the cache resources to which the operation state should be
* attached; @cookie indicates the cache object that will be accessed.
*
- * This is intended to be called from the ->begin_cache_operation() netfs lib
- * operation as implemented by the network filesystem.
- *
* @cres->inval_counter is set from @cookie->inval_counter for comparison at
* the end of the operation. This allows invalidation during the operation to
* be detected by the caller.
@@ -629,48 +626,6 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
}
-#if __fscache_available
-bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
- struct fscache_cookie *cookie);
-#else
-#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \
- filemap_dirty_folio(MAPPING, FOLIO)
-#endif
-
-/**
- * fscache_unpin_writeback - Unpin writeback resources
- * @wbc: The writeback control
- * @cookie: The cookie referring to the cache object
- *
- * Unpin the writeback resources pinned by fscache_dirty_folio(). This is
- * intended to be called by the netfs's ->write_inode() method.
- */
-static inline void fscache_unpin_writeback(struct writeback_control *wbc,
- struct fscache_cookie *cookie)
-{
- if (wbc->unpinned_fscache_wb)
- fscache_unuse_cookie(cookie, NULL, NULL);
-}
-
-/**
- * fscache_clear_inode_writeback - Clear writeback resources pinned by an inode
- * @cookie: The cookie referring to the cache object
- * @inode: The inode to clean up
- * @aux: Auxiliary data to apply to the inode
- *
- * Clear any writeback resources held by an inode when the inode is evicted.
- * This must be called before clear_inode() is called.
- */
-static inline void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
- struct inode *inode,
- const void *aux)
-{
- if (inode->i_state & I_PINNING_FSCACHE_WB) {
- loff_t i_size = i_size_read(inode);
- fscache_unuse_cookie(cookie, aux, &i_size);
- }
-}
-
/**
* fscache_note_page_release - Note that a netfs page got released
* @cookie: The cookie corresponding to the file
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index c895b12737a1..12f9e455d569 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -31,7 +31,7 @@
#define FSCRYPT_CONTENTS_ALIGNMENT 16
union fscrypt_policy;
-struct fscrypt_info;
+struct fscrypt_inode_info;
struct fs_parameter;
struct seq_file;
@@ -59,26 +59,55 @@ struct fscrypt_name {
#ifdef CONFIG_FS_ENCRYPTION
-/*
- * If set, the fscrypt bounce page pool won't be allocated (unless another
- * filesystem needs it). Set this if the filesystem always uses its own bounce
- * pages for writes and therefore won't need the fscrypt bounce page pool.
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
/* Crypto operations for filesystems */
struct fscrypt_operations {
- /* Set of optional flags; see above for allowed flags */
- unsigned int flags;
+ /*
+ * If set, then fs/crypto/ will allocate a global bounce page pool the
+ * first time an encryption key is set up for a file. The bounce page
+ * pool is required by the following functions:
+ *
+ * - fscrypt_encrypt_pagecache_blocks()
+ * - fscrypt_zeroout_range() for files not using inline crypto
+ *
+ * If the filesystem doesn't use those, it doesn't need to set this.
+ */
+ unsigned int needs_bounce_pages : 1;
/*
- * If set, this is a filesystem-specific key description prefix that
- * will be accepted for "logon" keys for v1 fscrypt policies, in
- * addition to the generic prefix "fscrypt:". This functionality is
- * deprecated, so new filesystems shouldn't set this field.
+ * If set, then fs/crypto/ will allow the use of encryption settings
+ * that assume inode numbers fit in 32 bits (i.e.
+ * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other
+ * prerequisites for these settings are also met. This is only useful
+ * if the filesystem wants to support inline encryption hardware that is
+ * limited to 32-bit or 64-bit data unit numbers and where programming
+ * keyslots is very slow.
*/
- const char *key_prefix;
+ unsigned int has_32bit_inodes : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow users to select a crypto data unit
+ * size that is less than the filesystem block size. This is done via
+ * the log2_data_unit_size field of the fscrypt policy. This flag is
+ * not compatible with filesystems that encrypt variable-length blocks
+ * (i.e. blocks that aren't all equal to filesystem's block size), for
+ * example as a result of compression. It's also not compatible with
+ * the fscrypt_encrypt_block_inplace() and
+ * fscrypt_decrypt_block_inplace() functions.
+ */
+ unsigned int supports_subblock_data_units : 1;
+
+ /*
+ * This field exists only for backwards compatibility reasons and should
+ * only be set by the filesystems that are setting it already. It
+ * contains the filesystem-specific key description prefix that is
+ * accepted for "logon" keys for v1 fscrypt policies. This
+ * functionality is deprecated in favor of the generic prefix
+ * "fscrypt:", which itself is deprecated in favor of the filesystem
+ * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that
+ * are newly adding fscrypt support should not set this field.
+ */
+ const char *legacy_key_prefix;
/*
* Get the fscrypt context of the given inode.
@@ -146,21 +175,6 @@ struct fscrypt_operations {
bool (*has_stable_inodes)(struct super_block *sb);
/*
- * Get the number of bits that the filesystem uses to represent inode
- * numbers and file logical block numbers.
- *
- * By default, both of these are assumed to be 64-bit. This function
- * can be implemented to declare that either or both of these numbers is
- * shorter, which may allow the use of the
- * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of
- * inline crypto hardware whose maximum DUN length is less than 64 bits
- * (e.g., eMMC v5.2 spec compliant hardware). This function only needs
- * to be implemented if support for one of these features is needed.
- */
- void (*get_ino_and_lblk_bits)(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret);
-
- /*
* Return an array of pointers to the block devices to which the
* filesystem may write encrypted file contents, NULL if the filesystem
* only has a single such block device, or an ERR_PTR() on error.
@@ -178,7 +192,8 @@ struct fscrypt_operations {
unsigned int *num_devs);
};
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
/*
* Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
@@ -390,7 +405,8 @@ static inline void fscrypt_set_ops(struct super_block *sb,
}
#else /* !CONFIG_FS_ENCRYPTION */
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
return NULL;
}
@@ -868,7 +884,7 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
*/
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
- return fscrypt_get_info(inode) != NULL;
+ return fscrypt_get_inode_info(inode) != NULL;
}
/**
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index a86115bc799c..a1b3de87a3d1 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -48,7 +48,7 @@ struct fsl_mc_driver {
struct device_driver driver;
const struct fsl_mc_device_id *match_id_table;
int (*probe)(struct fsl_mc_device *dev);
- int (*remove)(struct fsl_mc_device *dev);
+ void (*remove)(struct fsl_mc_device *dev);
void (*shutdown)(struct fsl_mc_device *dev);
int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
int (*resume)(struct fsl_mc_device *dev);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index bb8467cd11ae..8300a5286988 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -91,38 +91,78 @@ static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
static inline int fsnotify_file(struct file *file, __u32 mask)
{
- const struct path *path = &file->f_path;
+ const struct path *path;
if (file->f_mode & FMODE_NONOTIFY)
return 0;
+ path = &file->f_path;
return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
-/* Simple call site for access decisions */
-static inline int fsnotify_perm(struct file *file, int mask)
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+/*
+ * fsnotify_file_area_perm - permission hook before access to file range
+ */
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
{
- int ret;
- __u32 fsnotify_mask = 0;
+ __u32 fsnotify_mask = FS_ACCESS_PERM;
+
+ /*
+ * filesystem may be modified in the context of permission events
+ * (e.g. by HSM filling a file on access), so sb freeze protection
+ * must not be held.
+ */
+ lockdep_assert_once(file_write_not_started(file));
- if (!(mask & (MAY_READ | MAY_OPEN)))
+ if (!(perm_mask & MAY_READ))
return 0;
- if (mask & MAY_OPEN) {
- fsnotify_mask = FS_OPEN_PERM;
+ return fsnotify_file(file, fsnotify_mask);
+}
- if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+/*
+ * fsnotify_file_perm - permission hook before file access
+ */
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
+}
- if (ret)
- return ret;
- }
- } else if (mask & MAY_READ) {
- fsnotify_mask = FS_ACCESS_PERM;
+/*
+ * fsnotify_open_perm - permission hook before file open
+ */
+static inline int fsnotify_open_perm(struct file *file)
+{
+ int ret;
+
+ if (file->f_flags & __FMODE_EXEC) {
+ ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+ if (ret)
+ return ret;
}
- return fsnotify_file(file, fsnotify_mask);
+ return fsnotify_file(file, FS_OPEN_PERM);
+}
+
+#else
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
+{
+ return 0;
+}
+
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return 0;
+}
+
+static inline int fsnotify_open_perm(struct file *file)
+{
+ return 0;
}
+#endif
/*
* fsnotify_link_count - inode's link count changed
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index d7d96c806bff..7f63be5ca0f1 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -472,10 +472,8 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
struct fsnotify_mark_connector {
spinlock_t lock;
unsigned short type; /* Type of object [lock] */
-#define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01
#define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02
unsigned short flags; /* flags [lock] */
- __kernel_fsid_t fsid; /* fsid of filesystem containing object */
union {
/* Object pointer [lock] */
fsnotify_connp_t *obj;
@@ -530,6 +528,8 @@ struct fsnotify_mark {
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
#define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200
#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400
+#define FSNOTIFY_MARK_FLAG_HAS_FSID 0x0800
+#define FSNOTIFY_MARK_FLAG_WEAK_FSID 0x1000
unsigned int flags; /* flags [mark->lock] */
};
@@ -760,17 +760,13 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark,
/* Find mark belonging to given group in the list of marks */
extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
struct fsnotify_group *group);
-/* Get cached fsid of filesystem containing object */
-extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn,
- __kernel_fsid_t *fsid);
/* attach the mark to the object */
extern int fsnotify_add_mark(struct fsnotify_mark *mark,
fsnotify_connp_t *connp, unsigned int obj_type,
- int add_flags, __kernel_fsid_t *fsid);
+ int add_flags);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
fsnotify_connp_t *connp,
- unsigned int obj_type, int add_flags,
- __kernel_fsid_t *fsid);
+ unsigned int obj_type, int add_flags);
/* attach the mark to the inode */
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
@@ -778,15 +774,14 @@ static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
int add_flags)
{
return fsnotify_add_mark(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, add_flags, NULL);
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags);
}
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
struct inode *inode,
int add_flags)
{
return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, add_flags,
- NULL);
+ FSNOTIFY_OBJ_TYPE_INODE, add_flags);
}
/* given a group and a mark, flag mark to be freed when all references are dropped */
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index e76605d5b36e..1eb7eae580be 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -143,8 +143,8 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *arg);
int fsverity_ioctl_measure(struct file *filp, void __user *arg);
int fsverity_get_digest(struct inode *inode,
- u8 digest[FS_VERITY_MAX_DIGEST_SIZE],
- enum hash_algo *alg);
+ u8 raw_digest[FS_VERITY_MAX_DIGEST_SIZE],
+ u8 *alg, enum hash_algo *halg);
/* open.c */
@@ -197,10 +197,14 @@ static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg)
}
static inline int fsverity_get_digest(struct inode *inode,
- u8 digest[FS_VERITY_MAX_DIGEST_SIZE],
- enum hash_algo *alg)
+ u8 raw_digest[FS_VERITY_MAX_DIGEST_SIZE],
+ u8 *alg, enum hash_algo *halg)
{
- return -EOPNOTSUPP;
+ /*
+ * fsverity is not enabled in the kernel configuration, so always report
+ * that the file doesn't have fsverity enabled (digest size 0).
+ */
+ return 0;
}
/* open.c */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index b23bdd414394..e8921871ef9a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -41,6 +41,15 @@ struct ftrace_ops;
struct ftrace_regs;
struct dyn_ftrace;
+char *arch_ftrace_match_adjust(char *str, const char *search);
+
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
+struct fgraph_ret_regs;
+unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
+#else
+unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+#endif
+
#ifdef CONFIG_FUNCTION_TRACER
/*
* If the arch's mcount caller does not support all of ftrace's
@@ -633,6 +642,7 @@ enum {
FTRACE_ITER_MOD = (1 << 5),
FTRACE_ITER_ENABLED = (1 << 6),
FTRACE_ITER_TOUCHED = (1 << 7),
+ FTRACE_ITER_ADDRS = (1 << 8),
};
void arch_ftrace_update_code(int command);
@@ -674,7 +684,6 @@ void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
/* defined in arch */
-extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void);
extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
@@ -849,20 +858,12 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
}
#endif
-/* May be defined in arch */
-extern int ftrace_arch_read_dyn_info(char *buf, int size);
-
extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
extern void ftrace_module_enable(struct module *mod);
extern void ftrace_release_mod(struct module *mod);
-
-extern void ftrace_disable_daemon(void);
-extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline void ftrace_disable_daemon(void) { }
-static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { }
@@ -1018,6 +1019,9 @@ struct ftrace_graph_ent {
*/
struct ftrace_graph_ret {
unsigned long func; /* Current function */
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+ unsigned long retval;
+#endif
int depth;
/* Number of functions that overran the depth limit for current task */
unsigned int overrun;
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
new file mode 100644
index 000000000000..95421860397a
--- /dev/null
+++ b/include/linux/fw_table.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * fw_tables.h - Parsing support for ACPI and ACPI-like tables provided by
+ * platform or device firmware
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2023 Intel Corp.
+ */
+#ifndef _FW_TABLE_H_
+#define _FW_TABLE_H_
+
+union acpi_subtable_headers;
+
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
+ const unsigned long end);
+
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+ void *arg, const unsigned long end);
+
+struct acpi_subtable_proc {
+ int id;
+ acpi_tbl_entry_handler handler;
+ acpi_tbl_entry_handler_arg handler_arg;
+ void *arg;
+ int count;
+};
+
+union fw_table_header {
+ struct acpi_table_header acpi;
+ struct acpi_table_cdat cdat;
+};
+
+union acpi_subtable_headers {
+ struct acpi_subtable_header common;
+ struct acpi_hmat_structure hmat;
+ struct acpi_prmt_module_header prmt;
+ struct acpi_cedt_header cedt;
+ struct acpi_cdat_header cdat;
+};
+
+int acpi_parse_entries_array(char *id, unsigned long table_size,
+ union fw_table_header *table_header,
+ struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
+
+int cdat_table_parse(enum acpi_cdat_type type,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg,
+ struct acpi_table_cdat *table_header);
+
+/* CXL is the only non-ACPI consumer of the FIRMWARE_TABLE library */
+#if IS_ENABLED(CONFIG_ACPI) && !IS_ENABLED(CONFIG_CXL_BUS)
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_fwtbl_lib __init_or_acpilib
+#else
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, CXL)
+#define __init_or_fwtbl_lib
+#endif
+
+#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 5700451b300f..2a72f55d26eb 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -41,6 +41,8 @@ struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
+
+ /* The below is used solely by device links, don't use otherwise */
struct device *dev;
struct list_head suppliers;
struct list_head consumers;
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 8c2f00018e89..07e370113b2b 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -5,7 +5,6 @@
#ifndef _GAMEPORT_H
#define _GAMEPORT_H
-#include <asm/io.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -64,7 +63,7 @@ struct gameport_driver {
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
void gameport_close(struct gameport *gameport);
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
void __gameport_register_port(struct gameport *gameport, struct module *owner);
/* use a define to avoid include chaining to get THIS_MODULE */
@@ -165,18 +164,12 @@ void gameport_unregister_driver(struct gameport_driver *drv);
static inline void gameport_trigger(struct gameport *gameport)
{
- if (gameport->trigger)
- gameport->trigger(gameport);
- else
- outb(0xff, gameport->io);
+ gameport->trigger(gameport);
}
static inline unsigned char gameport_read(struct gameport *gameport)
{
- if (gameport->read)
- return gameport->read(gameport);
- else
- return inb(gameport->io);
+ return gameport->read(gameport);
}
static inline int gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons)
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index 107613f7d792..847413164738 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -38,6 +38,7 @@
#include <asm/page.h>
#include <linux/bug.h>
+#include <linux/limits.h>
#include <linux/log2.h>
#include <linux/math.h>
#include <linux/types.h>
@@ -116,6 +117,11 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *)
#define __genradix_obj_size(_radix) sizeof((_radix)->type[0])
+#define __genradix_objs_per_page(_radix) \
+ (PAGE_SIZE / sizeof((_radix)->type[0]))
+#define __genradix_page_remainder(_radix) \
+ (PAGE_SIZE % sizeof((_radix)->type[0]))
+
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
@@ -179,11 +185,35 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
#define genradix_iter_peek(_iter, _radix) \
(__genradix_cast(_radix) \
__genradix_iter_peek(_iter, &(_radix)->tree, \
- PAGE_SIZE / __genradix_obj_size(_radix)))
+ __genradix_objs_per_page(_radix)))
+
+void *__genradix_iter_peek_prev(struct genradix_iter *, struct __genradix *,
+ size_t, size_t);
+
+/**
+ * genradix_iter_peek_prev - get first entry at or below iterator's current
+ * position
+ * @_iter: a genradix_iter
+ * @_radix: genradix being iterated over
+ *
+ * If no more entries exist at or below @_iter's current position, returns NULL
+ */
+#define genradix_iter_peek_prev(_iter, _radix) \
+ (__genradix_cast(_radix) \
+ __genradix_iter_peek_prev(_iter, &(_radix)->tree, \
+ __genradix_objs_per_page(_radix), \
+ __genradix_obj_size(_radix) + \
+ __genradix_page_remainder(_radix)))
static inline void __genradix_iter_advance(struct genradix_iter *iter,
size_t obj_size)
{
+ if (iter->offset + obj_size < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return;
+ }
+
iter->offset += obj_size;
if (!is_power_of_2(obj_size) &&
@@ -196,6 +226,25 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_iter_advance(_iter, _radix) \
__genradix_iter_advance(_iter, __genradix_obj_size(_radix))
+static inline void __genradix_iter_rewind(struct genradix_iter *iter,
+ size_t obj_size)
+{
+ if (iter->offset == 0 ||
+ iter->offset == SIZE_MAX) {
+ iter->offset = SIZE_MAX;
+ return;
+ }
+
+ if ((iter->offset & (PAGE_SIZE - 1)) == 0)
+ iter->offset -= PAGE_SIZE % obj_size;
+
+ iter->offset -= obj_size;
+ iter->pos--;
+}
+
+#define genradix_iter_rewind(_iter, _radix) \
+ __genradix_iter_rewind(_iter, __genradix_obj_size(_radix))
+
#define genradix_for_each_from(_radix, _iter, _p, _start) \
for (_iter = genradix_iter_init(_radix, _start); \
(_p = genradix_iter_peek(&_iter, _radix)) != NULL; \
@@ -213,6 +262,23 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_for_each(_radix, _iter, _p) \
genradix_for_each_from(_radix, _iter, _p, 0)
+#define genradix_last_pos(_radix) \
+ (SIZE_MAX / PAGE_SIZE * __genradix_objs_per_page(_radix) - 1)
+
+/**
+ * genradix_for_each_reverse - iterate over entry in a genradix, reverse order
+ * @_radix: genradix to iterate over
+ * @_iter: a genradix_iter to track current position
+ * @_p: pointer to genradix entry type
+ *
+ * On every iteration, @_p will point to the current entry, and @_iter.pos
+ * will be the current entry's index.
+ */
+#define genradix_for_each_reverse(_radix, _iter, _p) \
+ for (_iter = genradix_iter_init(_radix, genradix_last_pos(_radix));\
+ (_p = genradix_iter_peek_prev(&_iter, _radix)) != NULL;\
+ genradix_iter_rewind(&_iter, _radix))
+
int __genradix_prealloc(struct __genradix *, size_t, gfp_t);
/**
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 2984b0cb24b1..d4da060b7532 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -2,6 +2,7 @@
#ifndef GENL_MAGIC_FUNC_H
#define GENL_MAGIC_FUNC_H
+#include <linux/args.h>
#include <linux/build_bug.h>
#include <linux/genl_magic_struct.h>
@@ -23,7 +24,7 @@
#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
[tag_name] = { .type = NLA_NESTED },
-static struct nla_policy CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
+static struct nla_policy CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -209,7 +210,7 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
* Magic: define op number to op name mapping {{{1
* {{{2
*/
-static const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+static const char *CONCATENATE(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
{
switch (cmd) {
#undef GENL_op
@@ -235,7 +236,7 @@ static const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
.cmd = op_name, \
},
-#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
+#define ZZZ_genl_ops CONCATENATE(GENL_MAGIC_FAMILY, _genl_ops)
static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -248,32 +249,32 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
* and provide register/unregister functions.
* {{{2
*/
-#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
+#define ZZZ_genl_family CONCATENATE(GENL_MAGIC_FAMILY, _genl_family)
static struct genl_family ZZZ_genl_family;
/*
* Magic: define multicast groups
* Magic: define multicast group registration helper
*/
-#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps)
+#define ZZZ_genl_mcgrps CONCATENATE(GENL_MAGIC_FAMILY, _genl_mcgrps)
static const struct genl_multicast_group ZZZ_genl_mcgrps[] = {
#undef GENL_mc_group
#define GENL_mc_group(group) { .name = #group, },
#include GENL_MAGIC_INCLUDE_FILE
};
-enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) {
+enum CONCATENATE(GENL_MAGIC_FAMILY, group_ids) {
#undef GENL_mc_group
-#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group),
+#define GENL_mc_group(group) CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group),
#include GENL_MAGIC_INCLUDE_FILE
};
#undef GENL_mc_group
#define GENL_mc_group(group) \
-static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
+static int CONCATENATE(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
struct sk_buff *skb, gfp_t flags) \
{ \
unsigned int group_id = \
- CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \
+ CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group); \
return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \
group_id, flags); \
}
@@ -289,8 +290,8 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
#ifdef GENL_MAGIC_FAMILY_HDRSZ
.hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
#endif
- .maxattr = ARRAY_SIZE(CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
- .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy),
+ .maxattr = ARRAY_SIZE(CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
+ .policy = CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy),
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
@@ -299,12 +300,12 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
.module = THIS_MODULE,
};
-int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
+int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void)
{
return genl_register_family(&ZZZ_genl_family);
}
-void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
+void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void)
{
genl_unregister_family(&ZZZ_genl_family);
}
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index f81d48987528..a419d93789ff 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -14,14 +14,12 @@
# error "you need to define GENL_MAGIC_INCLUDE_FILE before inclusion"
#endif
+#include <linux/args.h>
#include <linux/genetlink.h>
#include <linux/types.h>
-#define CONCAT__(a,b) a ## b
-#define CONCAT_(a,b) CONCAT__(a,b)
-
-extern int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void);
-extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
+extern int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void);
+extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void);
/*
* Extension of genl attribute validation policies {{{2
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ed8cb537c6a7..e2a916cf29c4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -8,6 +8,7 @@
#include <linux/topology.h>
struct vm_area_struct;
+struct mempolicy;
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -262,7 +263,9 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
#ifdef CONFIG_NUMA
struct page *alloc_pages(gfp_t gfp, unsigned int order);
-struct folio *folio_alloc(gfp_t gfp, unsigned order);
+struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid);
+struct folio *folio_alloc(gfp_t gfp, unsigned int order);
struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, bool hugepage);
#else
@@ -270,6 +273,11 @@ static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages_node(numa_node_id(), gfp_mask, order);
}
+static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid)
+{
+ return alloc_pages(gfp, order);
+}
static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
{
return __folio_alloc_node(gfp, order, numa_node_id());
@@ -320,11 +328,13 @@ extern void page_frag_free(void *addr);
#define free_page(addr) free_pages((addr), 0)
void page_alloc_init_cpuhp(void);
+int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
void page_alloc_init_late(void);
+void setup_pcp_cacheinfo(void);
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
@@ -338,19 +348,21 @@ extern gfp_t gfp_allowed_mask;
/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
-extern void pm_restrict_gfp_mask(void);
-extern void pm_restore_gfp_mask(void);
-
-extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
+static inline bool gfp_has_io_fs(gfp_t gfp)
+{
+ return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
+}
-#ifdef CONFIG_PM_SLEEP
-extern bool pm_suspended_storage(void);
-#else
-static inline bool pm_suspended_storage(void)
+/*
+ * Check if the gfp flags allow compaction - GFP_NOIO is a really
+ * tricky context because the migration might require IO.
+ */
+static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
{
- return false;
+ return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
}
-#endif /* CONFIG_PM_SLEEP */
+
+extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
/* The below functions must be run on a range from a single zone. */
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 6583a58670c5..1b6053da8754 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -162,25 +162,25 @@ typedef unsigned int __bitwise gfp_t;
* %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
* !costly allocations are too essential to fail so they are implicitly
* non-failing by default (with some exceptions like OOM victims might fail so
* the caller still has to check for failures) while costly requests try to be
* not disruptive and back off even without invoking the OOM killer.
* The following three modifiers might be used to override some of these
- * implicit rules
+ * implicit rules.
*
* %__GFP_NORETRY: The VM implementation will try only very lightweight
* memory direct reclaim to get some memory under memory pressure (thus
* it can sleep). It will avoid disruptive actions like OOM killer. The
* caller must handle the failure which is quite likely to happen under
* heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
+ * handled at small cost, such as reduced throughput.
*
* %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
* procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
+ * that progress has been made elsewhere. It can wait for other
+ * tasks to attempt high-level approaches to freeing memory such as
* compaction (which removes fragmentation) and page-out.
* There is still a definite limit to the number of retries, but it is
* a larger limit than with %__GFP_NORETRY.
@@ -230,7 +230,7 @@ typedef unsigned int __bitwise gfp_t;
* is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
* __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
* memory tags at the same time as zeroing memory has minimal additional
- * performace impact.
+ * performance impact.
*
* %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation.
* Used for userspace and vmalloc pages; the latter are unpoisoned by
@@ -274,7 +274,8 @@ typedef unsigned int __bitwise gfp_t;
* accounted to kmemcg.
*
* %GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
+ * reclaim, start physical IO or use any filesystem callback. It is very
+ * likely to fail to allocate memory, even for very small allocations.
*
* %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
@@ -325,7 +326,7 @@ typedef unsigned int __bitwise gfp_t;
#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN)
#define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 8528353e073b..7ecc25c543ce 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -32,17 +32,6 @@ struct device;
/* Gpio pin is active-low */
#define GPIOF_ACTIVE_LOW (1 << 2)
-/* Gpio pin is open drain */
-#define GPIOF_OPEN_DRAIN (1 << 3)
-
-/* Gpio pin is open source */
-#define GPIOF_OPEN_SOURCE (1 << 4)
-
-#define GPIOF_EXPORT (1 << 5)
-#define GPIOF_EXPORT_CHANGEABLE (1 << 6)
-#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
-#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
-
/**
* struct gpio - a structure describing a GPIO with configuration
* @gpio: the GPIO number
@@ -119,11 +108,6 @@ static inline void gpio_set_value(unsigned gpio, int value)
return gpiod_set_raw_value(gpio_to_desc(gpio), value);
}
-static inline int gpio_cansleep(unsigned gpio)
-{
- return gpiod_cansleep(gpio_to_desc(gpio));
-}
-
static inline int gpio_to_irq(unsigned gpio)
{
return gpiod_to_irq(gpio_to_desc(gpio));
@@ -206,13 +190,6 @@ static inline void gpio_set_value(unsigned gpio, int value)
WARN_ON(1);
}
-static inline int gpio_cansleep(unsigned gpio)
-{
- /* GPIO can never have been requested or set as {in,out}put */
- WARN_ON(1);
- return 0;
-}
-
static inline int gpio_get_value_cansleep(unsigned gpio)
{
/* GPIO can never have been requested or set as {in,out}put */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 1c4385a00f88..db2dfbae8edb 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -159,7 +159,6 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
int gpiod_set_config(struct gpio_desc *desc, unsigned long config);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce);
-int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
void gpiod_toggle_active_low(struct gpio_desc *desc);
int gpiod_is_active_low(const struct gpio_desc *desc);
@@ -494,13 +493,6 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int deboun
return -ENOSYS;
}
-static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
-{
- /* GPIO can never have been requested */
- WARN_ON(desc);
- return -ENOSYS;
-}
-
static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -614,8 +606,6 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
int devm_acpi_dev_add_driver_gpios(struct device *dev,
const struct acpi_gpio_mapping *gpios);
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label);
-
#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
#include <linux/err.h>
@@ -633,12 +623,6 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
return -ENXIO;
}
-static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin,
- char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 67b8774eed8f..7f75c9a51874 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -3,6 +3,8 @@
#define __LINUX_GPIO_DRIVER_H
#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/irqhandler.h>
@@ -61,13 +63,6 @@ struct gpio_irq_chip {
*/
struct irq_domain *domain;
- /**
- * @domain_ops:
- *
- * Table of interrupt domain operations for this IRQ chip.
- */
- const struct irq_domain_ops *domain_ops;
-
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
* @fwnode:
@@ -536,29 +531,49 @@ struct gpio_chip {
#endif /* CONFIG_OF_GPIO */
};
-extern const char *gpiochip_is_requested(struct gpio_chip *gc,
- unsigned int offset);
+char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset);
+
+
+struct _gpiochip_for_each_data {
+ const char **label;
+ unsigned int *i;
+};
+
+DEFINE_CLASS(_gpiochip_for_each_data,
+ struct _gpiochip_for_each_data,
+ if (*_T.label) kfree(*_T.label),
+ ({
+ struct _gpiochip_for_each_data _data = { label, i };
+ *_data.i = 0;
+ _data;
+ }),
+ const char **label, int *i)
/**
* for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
- * @chip: the chip to query
- * @i: loop variable
- * @base: first GPIO in the range
- * @size: amount of GPIOs to check starting from @base
- * @label: label of current GPIO
+ * @_chip: the chip to query
+ * @_i: loop variable
+ * @_base: first GPIO in the range
+ * @_size: amount of GPIOs to check starting from @base
+ * @_label: label of current GPIO
*/
-#define for_each_requested_gpio_in_range(chip, i, base, size, label) \
- for (i = 0; i < size; i++) \
- if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else
+#define for_each_requested_gpio_in_range(_chip, _i, _base, _size, _label) \
+ for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \
+ *_data.i < _size; \
+ (*_data.i)++, kfree(*(_data.label)), *_data.label = NULL) \
+ if ((*_data.label = \
+ gpiochip_dup_line_label(_chip, _base + *_data.i)) == NULL) {} \
+ else if (IS_ERR(*_data.label)) {} \
+ else
/* Iterates over all requested GPIO of the given @chip */
#define for_each_requested_gpio(chip, i, label) \
for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label)
/* add/remove chips */
-extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
+int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
/**
* gpiochip_add_data() - register a gpio_chip
@@ -606,13 +621,23 @@ static inline int gpiochip_add(struct gpio_chip *gc)
{
return gpiochip_add_data(gc, NULL);
}
-extern void gpiochip_remove(struct gpio_chip *gc);
-extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
+void gpiochip_remove(struct gpio_chip *gc);
+int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
+ void *data, struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
+
+struct gpio_device *gpio_device_find(void *data,
+ int (*match)(struct gpio_chip *gc, void *data));
+struct gpio_device *gpio_device_find_by_label(const char *label);
+struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode);
+
+struct gpio_device *gpio_device_get(struct gpio_device *gdev);
+void gpio_device_put(struct gpio_device *gdev);
+
+DEFINE_FREE(gpio_device_put, struct gpio_device *,
+ if (!IS_ERR_OR_NULL(_T)) gpio_device_put(_T))
-extern struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *gc, void *data));
+struct device *gpio_device_to_device(struct gpio_device *gdev);
bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
@@ -697,7 +722,6 @@ int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
#else
#include <asm/bug.h>
-#include <asm/errno.h>
static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
struct irq_domain *domain)
@@ -765,18 +789,26 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
+struct gpio_desc *
+gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum);
+
+struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev);
+
#ifdef CONFIG_GPIOLIB
/* lock/unlock as IRQ */
int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset);
void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset);
-
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc);
-#else /* CONFIG_GPIOLIB */
+/* struct gpio_device getters */
+int gpio_device_get_base(struct gpio_device *gdev);
+const char *gpio_device_get_label(struct gpio_device *gdev);
-#include <linux/err.h>
+#else /* CONFIG_GPIOLIB */
#include <asm/bug.h>
@@ -787,6 +819,24 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
return ERR_PTR(-ENODEV);
}
+static inline struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
+{
+ WARN_ON(1);
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int gpio_device_get_base(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return -ENODEV;
+}
+
+static inline const char *gpio_device_get_label(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 3f84aeb81e48..80fa930b04c6 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -21,6 +21,7 @@ struct device;
* disable button via sysfs
* @value: axis value for %EV_ABS
* @irq: Irq number in case of interrupt keys
+ * @wakeirq: Optional dedicated wake-up interrupt
*/
struct gpio_keys_button {
unsigned int code;
@@ -34,6 +35,7 @@ struct gpio_keys_button {
bool can_disable;
int value;
unsigned int irq;
+ unsigned int wakeirq;
};
/**
diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h
index 5afaf5f06856..da547fb9071b 100644
--- a/include/linux/greybus/svc.h
+++ b/include/linux/greybus/svc.h
@@ -100,7 +100,4 @@ bool gb_svc_watchdog_enabled(struct gb_svc *svc);
int gb_svc_watchdog_enable(struct gb_svc *svc);
int gb_svc_watchdog_disable(struct gb_svc *svc);
-int gb_svc_protocol_init(void);
-void gb_svc_protocol_exit(void);
-
#endif /* __SVC_H */
diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h
new file mode 100644
index 000000000000..f316c8d0f3fc
--- /dev/null
+++ b/include/linux/habanalabs/cpucp_if.h
@@ -0,0 +1,1423 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020-2023 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef CPUCP_IF_H
+#define CPUCP_IF_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "hl_boot_if.h"
+
+#define NUM_HBM_PSEUDO_CH 2
+#define NUM_HBM_CH_PER_DEV 8
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK 0x00000001
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT 1
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK 0x00000002
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT 2
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK 0x00000004
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT 3
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK 0x00000008
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT 4
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK 0x00000010
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT 5
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK 0x00000020
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0
+
+#define PLL_MAP_MAX_BITS 128
+#define PLL_MAP_LEN (PLL_MAP_MAX_BITS / 8)
+
+enum eq_event_id {
+ EQ_EVENT_NIC_STS_REQUEST = 0,
+ EQ_EVENT_PWR_MODE_0,
+ EQ_EVENT_PWR_MODE_1,
+ EQ_EVENT_PWR_MODE_2,
+ EQ_EVENT_PWR_MODE_3,
+ EQ_EVENT_PWR_BRK_ENTRY,
+ EQ_EVENT_PWR_BRK_EXIT,
+ EQ_EVENT_HEARTBEAT,
+};
+
+/*
+ * info of the pkt queue pointers in the first async occurrence
+ */
+struct cpucp_pkt_sync_err {
+ __le32 pi;
+ __le32 ci;
+};
+
+struct hl_eq_hbm_ecc_data {
+ /* SERR counter */
+ __le32 sec_cnt;
+ /* DERR counter */
+ __le32 dec_cnt;
+ /* Supplemental Information according to the mask bits */
+ __le32 hbm_ecc_info;
+ /* Address in hbm where the ecc happened */
+ __le32 first_addr;
+ /* SERR continuous address counter */
+ __le32 sec_cont_cnt;
+ __le32 pad;
+};
+
+/*
+ * EVENT QUEUE
+ */
+
+struct hl_eq_header {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct hl_eq_ecc_data {
+ __le64 ecc_address;
+ __le64 ecc_syndrom;
+ __u8 memory_wrapper_idx;
+ __u8 is_critical;
+ __le16 block_id;
+ __u8 pad[4];
+};
+
+enum hl_sm_sei_cause {
+ SM_SEI_SO_OVERFLOW,
+ SM_SEI_LBW_4B_UNALIGNED,
+ SM_SEI_AXI_RESPONSE_ERR
+};
+
+struct hl_eq_sm_sei_data {
+ __le32 sei_log;
+ /* enum hl_sm_sei_cause */
+ __u8 sei_cause;
+ __u8 pad[3];
+};
+
+enum hl_fw_alive_severity {
+ FW_ALIVE_SEVERITY_MINOR,
+ FW_ALIVE_SEVERITY_CRITICAL
+};
+
+struct hl_eq_fw_alive {
+ __le64 uptime_seconds;
+ __le32 process_id;
+ __le32 thread_id;
+ /* enum hl_fw_alive_severity */
+ __u8 severity;
+ __u8 pad[7];
+};
+
+struct hl_eq_intr_cause {
+ __le64 intr_cause_data;
+};
+
+struct hl_eq_pcie_drain_ind_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 drain_wr_addr_lbw;
+ __le64 drain_rd_addr_lbw;
+ __le64 drain_wr_addr_hbw;
+ __le64 drain_rd_addr_hbw;
+};
+
+struct hl_eq_razwi_lbw_info_regs {
+ __le32 rr_aw_razwi_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+struct hl_eq_razwi_hbw_info_regs {
+ __le32 rr_aw_razwi_hi_reg;
+ __le32 rr_aw_razwi_lo_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_hi_reg;
+ __le32 rr_ar_razwi_lo_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+/* razwi_happened masks */
+#define RAZWI_HAPPENED_HBW 0x1
+#define RAZWI_HAPPENED_LBW 0x2
+#define RAZWI_HAPPENED_AW 0x4
+#define RAZWI_HAPPENED_AR 0x8
+
+struct hl_eq_razwi_info {
+ __le32 razwi_happened_mask;
+ union {
+ struct hl_eq_razwi_lbw_info_regs lbw;
+ struct hl_eq_razwi_hbw_info_regs hbw;
+ };
+ __le32 pad;
+};
+
+struct hl_eq_razwi_with_intr_cause {
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_intr_cause intr_cause;
+};
+
+#define HBM_CA_ERR_CMD_LIFO_LEN 8
+#define HBM_RD_ERR_DATA_LIFO_LEN 8
+#define HBM_WR_PAR_CMD_LIFO_LEN 11
+
+enum hl_hbm_sei_cause {
+ /* Command/address parity error event is split into 2 events due to
+ * size limitation: ODD suffix for odd HBM CK_t cycles and EVEN suffix
+ * for even HBM CK_t cycles
+ */
+ HBM_SEI_CMD_PARITY_EVEN,
+ HBM_SEI_CMD_PARITY_ODD,
+ /* Read errors can be reflected as a combination of SERR/DERR/parity
+ * errors. Therefore, we define one event for all read error types.
+ * LKD will perform further proccessing.
+ */
+ HBM_SEI_READ_ERR,
+ HBM_SEI_WRITE_DATA_PARITY_ERR,
+ HBM_SEI_CATTRIP,
+ HBM_SEI_MEM_BIST_FAIL,
+ HBM_SEI_DFI,
+ HBM_SEI_INV_TEMP_READ_OUT,
+ HBM_SEI_BIST_FAIL,
+};
+
+/* Masks for parsing hl_hbm_sei_headr fields */
+#define HBM_ECC_SERR_CNTR_MASK 0xFF
+#define HBM_ECC_DERR_CNTR_MASK 0xFF00
+#define HBM_RD_PARITY_CNTR_MASK 0xFF0000
+
+/* HBM index and MC index are known by the event_id */
+struct hl_hbm_sei_header {
+ union {
+ /* relevant only in case of HBM read error */
+ struct {
+ __u8 ecc_serr_cnt;
+ __u8 ecc_derr_cnt;
+ __u8 read_par_cnt;
+ __u8 reserved;
+ };
+ /* All other cases */
+ __le32 cnt;
+ };
+ __u8 sei_cause; /* enum hl_hbm_sei_cause */
+ __u8 mc_channel; /* range: 0-3 */
+ __u8 mc_pseudo_channel; /* range: 0-7 */
+ __u8 is_critical;
+};
+
+#define HBM_RD_ADDR_SID_SHIFT 0
+#define HBM_RD_ADDR_SID_MASK 0x1
+#define HBM_RD_ADDR_BG_SHIFT 1
+#define HBM_RD_ADDR_BG_MASK 0x6
+#define HBM_RD_ADDR_BA_SHIFT 3
+#define HBM_RD_ADDR_BA_MASK 0x18
+#define HBM_RD_ADDR_COL_SHIFT 5
+#define HBM_RD_ADDR_COL_MASK 0x7E0
+#define HBM_RD_ADDR_ROW_SHIFT 11
+#define HBM_RD_ADDR_ROW_MASK 0x3FFF800
+
+struct hbm_rd_addr {
+ union {
+ /* bit fields are only for FW use */
+ struct {
+ u32 dbg_rd_err_addr_sid:1;
+ u32 dbg_rd_err_addr_bg:2;
+ u32 dbg_rd_err_addr_ba:2;
+ u32 dbg_rd_err_addr_col:6;
+ u32 dbg_rd_err_addr_row:15;
+ u32 reserved:6;
+ };
+ __le32 rd_addr_val;
+ };
+};
+
+#define HBM_RD_ERR_BEAT_SHIFT 2
+/* dbg_rd_err_misc fields: */
+/* Read parity is calculated per DW on every beat */
+#define HBM_RD_ERR_PAR_ERR_BEAT0_SHIFT 0
+#define HBM_RD_ERR_PAR_ERR_BEAT0_MASK 0x3
+#define HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT 8
+#define HBM_RD_ERR_PAR_DATA_BEAT0_MASK 0x300
+/* ECC is calculated per PC on every beat */
+#define HBM_RD_ERR_SERR_BEAT0_SHIFT 16
+#define HBM_RD_ERR_SERR_BEAT0_MASK 0x10000
+#define HBM_RD_ERR_DERR_BEAT0_SHIFT 24
+#define HBM_RD_ERR_DERR_BEAT0_MASK 0x100000
+
+struct hl_eq_hbm_sei_read_err_intr_info {
+ /* DFI_RD_ERR_REP_ADDR */
+ struct hbm_rd_addr dbg_rd_err_addr;
+ /* DFI_RD_ERR_REP_ERR */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 dbg_rd_err_par:8;
+ u32 dbg_rd_err_par_data:8;
+ u32 dbg_rd_err_serr:4;
+ u32 dbg_rd_err_derr:4;
+ u32 reserved:8;
+ };
+ __le32 dbg_rd_err_misc;
+ };
+ /* DFI_RD_ERR_REP_DM */
+ __le32 dbg_rd_err_dm;
+ /* DFI_RD_ERR_REP_SYNDROME */
+ __le32 dbg_rd_err_syndrome;
+ /* DFI_RD_ERR_REP_DATA */
+ __le32 dbg_rd_err_data[HBM_RD_ERR_DATA_LIFO_LEN];
+};
+
+struct hl_eq_hbm_sei_ca_par_intr_info {
+ /* 14 LSBs */
+ __le16 dbg_row[HBM_CA_ERR_CMD_LIFO_LEN];
+ /* 18 LSBs */
+ __le32 dbg_col[HBM_CA_ERR_CMD_LIFO_LEN];
+};
+
+#define WR_PAR_LAST_CMD_COL_SHIFT 0
+#define WR_PAR_LAST_CMD_COL_MASK 0x3F
+#define WR_PAR_LAST_CMD_BG_SHIFT 6
+#define WR_PAR_LAST_CMD_BG_MASK 0xC0
+#define WR_PAR_LAST_CMD_BA_SHIFT 8
+#define WR_PAR_LAST_CMD_BA_MASK 0x300
+#define WR_PAR_LAST_CMD_SID_SHIFT 10
+#define WR_PAR_LAST_CMD_SID_MASK 0x400
+
+/* Row address isn't latched */
+struct hbm_sei_wr_cmd_address {
+ /* DFI_DERR_LAST_CMD */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 col:6;
+ u32 bg:2;
+ u32 ba:2;
+ u32 sid:1;
+ u32 reserved:21;
+ };
+ __le32 dbg_wr_cmd_addr;
+ };
+};
+
+struct hl_eq_hbm_sei_wr_par_intr_info {
+ /* entry 0: WR command address from the 1st cycle prior to the error
+ * entry 1: WR command address from the 2nd cycle prior to the error
+ * and so on...
+ */
+ struct hbm_sei_wr_cmd_address dbg_last_wr_cmds[HBM_WR_PAR_CMD_LIFO_LEN];
+ /* derr[0:1] - 1st HBM cycle DERR output
+ * derr[2:3] - 2nd HBM cycle DERR output
+ */
+ __u8 dbg_derr;
+ /* extend to reach 8B */
+ __u8 pad[3];
+};
+
+/*
+ * this struct represents the following sei causes:
+ * command parity, ECC double error, ECC single error, dfi error, cattrip,
+ * temperature read-out, read parity error and write parity error.
+ * some only use the header while some have extra data.
+ */
+struct hl_eq_hbm_sei_data {
+ struct hl_hbm_sei_header hdr;
+ union {
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_even_info;
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_odd_info;
+ struct hl_eq_hbm_sei_read_err_intr_info read_err_info;
+ struct hl_eq_hbm_sei_wr_par_intr_info wr_parity_info;
+ };
+};
+
+/* Engine/farm arc interrupt type */
+enum hl_engine_arc_interrupt_type {
+ /* Qman/farm ARC DCCM QUEUE FULL interrupt type */
+ ENGINE_ARC_DCCM_QUEUE_FULL_IRQ = 1
+};
+
+/* Data structure specifies details of payload of DCCM QUEUE FULL interrupt */
+struct hl_engine_arc_dccm_queue_full_irq {
+ /* Queue index value which caused DCCM QUEUE FULL */
+ __le32 queue_index;
+ __le32 pad;
+};
+
+/* Data structure specifies details of QM/FARM ARC interrupt */
+struct hl_eq_engine_arc_intr_data {
+ /* ARC engine id e.g. DCORE0_TPC0_QM_ARC, DCORE0_TCP1_QM_ARC */
+ __le32 engine_id;
+ __le32 intr_type; /* enum hl_engine_arc_interrupt_type */
+ /* More info related to the interrupt e.g. queue index
+ * incase of DCCM_QUEUE_FULL interrupt.
+ */
+ __le64 payload;
+ __le64 pad[5];
+};
+
+#define ADDR_DEC_ADDRESS_COUNT_MAX 4
+
+/* Data structure specifies details of ADDR_DEC interrupt */
+struct hl_eq_addr_dec_intr_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX];
+ __u8 addr_cnt;
+ __u8 pad[7];
+};
+
+struct hl_eq_entry {
+ struct hl_eq_header hdr;
+ union {
+ __le64 data_placeholder;
+ struct hl_eq_ecc_data ecc_data;
+ struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Obsolete */
+ struct hl_eq_sm_sei_data sm_sei_data;
+ struct cpucp_pkt_sync_err pkt_sync_err;
+ struct hl_eq_fw_alive fw_alive;
+ struct hl_eq_intr_cause intr_cause;
+ struct hl_eq_pcie_drain_ind_data pcie_drain_ind_data;
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause;
+ struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */
+ struct hl_eq_engine_arc_intr_data arc_data;
+ struct hl_eq_addr_dec_intr_data addr_dec;
+ __le64 data[7];
+ };
+};
+
+#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
+
+#define EQ_CTL_READY_SHIFT 31
+#define EQ_CTL_READY_MASK 0x80000000
+
+#define EQ_CTL_EVENT_TYPE_SHIFT 16
+#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
+
+#define EQ_CTL_INDEX_SHIFT 0
+#define EQ_CTL_INDEX_MASK 0x0000FFFF
+
+enum pq_init_status {
+ PQ_INIT_STATUS_NA = 0,
+ PQ_INIT_STATUS_READY_FOR_CP,
+ PQ_INIT_STATUS_READY_FOR_HOST,
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI,
+ PQ_INIT_STATUS_LEN_NOT_POWER_OF_TWO_ERR,
+ PQ_INIT_STATUS_ILLEGAL_Q_ADDR_ERR
+};
+
+/*
+ * CpuCP Primary Queue Packets
+ *
+ * During normal operation, the host's kernel driver needs to send various
+ * messages to CpuCP, usually either to SET some value into a H/W periphery or
+ * to GET the current value of some H/W periphery. For example, SET the
+ * frequency of MME/TPC and GET the value of the thermal sensor.
+ *
+ * These messages can be initiated either by the User application or by the
+ * host's driver itself, e.g. power management code. In either case, the
+ * communication from the host's driver to CpuCP will *always* be in
+ * synchronous mode, meaning that the host will send a single message and poll
+ * until the message was acknowledged and the results are ready (if results are
+ * needed).
+ *
+ * This means that only a single message can be sent at a time and the host's
+ * driver must wait for its result before sending the next message. Having said
+ * that, because these are control messages which are sent in a relatively low
+ * frequency, this limitation seems acceptable. It's important to note that
+ * in case of multiple devices, messages to different devices *can* be sent
+ * at the same time.
+ *
+ * The message, inputs/outputs (if relevant) and fence object will be located
+ * on the device DDR at an address that will be determined by the host's driver.
+ * During device initialization phase, the host will pass to CpuCP that address.
+ * Most of the message types will contain inputs/outputs inside the message
+ * itself. The common part of each message will contain the opcode of the
+ * message (its type) and a field representing a fence object.
+ *
+ * When the host's driver wishes to send a message to CPU CP, it will write the
+ * message contents to the device DDR, clear the fence object and then write to
+ * the PSOC_ARC1_AUX_SW_INTR, to issue interrupt 121 to ARC Management CPU.
+ *
+ * Upon receiving the interrupt (#121), CpuCP will read the message from the
+ * DDR. In case the message is a SET operation, CpuCP will first perform the
+ * operation and then write to the fence object on the device DDR. In case the
+ * message is a GET operation, CpuCP will first fill the results section on the
+ * device DDR and then write to the fence object. If an error occurred, CpuCP
+ * will fill the rc field with the right error code.
+ *
+ * In the meantime, the host's driver will poll on the fence object. Once the
+ * host sees that the fence object is signaled, it will read the results from
+ * the device DDR (if relevant) and resume the code execution in the host's
+ * driver.
+ *
+ * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
+ * so the value being put by the host's driver matches the value read by CpuCP
+ *
+ * Non-QMAN packets should be limited to values 1 through (2^8 - 1)
+ *
+ * Detailed description:
+ *
+ * CPUCP_PACKET_DISABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU must NOT issue PCI
+ * transactions (read/write) towards the Host CPU. This also include
+ * sending MSI-X interrupts.
+ * This packet is usually sent before the device is moved to D3Hot state.
+ *
+ * CPUCP_PACKET_ENABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU is allowed to issue PCI
+ * transactions towards the Host CPU, including sending MSI-X interrupts.
+ * This packet is usually send after the device is moved to D0 state.
+ *
+ * CPUCP_PACKET_TEMPERATURE_GET -
+ * Fetch the current temperature / Max / Max Hyst / Critical /
+ * Critical Hyst of a specified thermal sensor. The packet's
+ * arguments specify the desired sensor and the field to get.
+ *
+ * CPUCP_PACKET_VOLTAGE_GET -
+ * Fetch the voltage / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_CURRENT_GET -
+ * Fetch the current / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_FAN_SPEED_GET -
+ * Fetch the speed / Max / Min of a specified fan. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_GET -
+ * Fetch the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_SET -
+ * Set the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor, type and value.
+ *
+ * CPUCP_PACKET_FREQUENCY_SET -
+ * Set the frequency of a specified PLL. The packet's arguments specify
+ * the PLL and the desired frequency. The actual frequency in the device
+ * might differ from the requested frequency.
+ *
+ * CPUCP_PACKET_FREQUENCY_GET -
+ * Fetch the frequency of a specified PLL. The packet's arguments specify
+ * the PLL.
+ *
+ * CPUCP_PACKET_LED_SET -
+ * Set the state of a specified led. The packet's arguments
+ * specify the led and the desired state.
+ *
+ * CPUCP_PACKET_I2C_WR -
+ * Write 32-bit value to I2C device. The packet's arguments specify the
+ * I2C bus, address and value.
+ *
+ * CPUCP_PACKET_I2C_RD -
+ * Read 32-bit value from I2C device. The packet's arguments specify the
+ * I2C bus and address.
+ *
+ * CPUCP_PACKET_INFO_GET -
+ * Fetch information from the device as specified in the packet's
+ * structure. The host's driver passes the max size it allows the CpuCP to
+ * write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ -
+ * Unmask the given IRQ. The IRQ number is specified in the value field.
+ * The packet is sent after receiving an interrupt and printing its
+ * relevant information.
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
+ * Unmask the given IRQs. The IRQs numbers are specified in an array right
+ * after the cpucp_packet structure, where its first element is the array
+ * length. The packet is sent after a soft reset was done in order to
+ * handle any interrupts that were sent during the reset process.
+ *
+ * CPUCP_PACKET_TEST -
+ * Test packet for CpuCP connectivity. The CPU will put the fence value
+ * in the result field.
+ *
+ * CPUCP_PACKET_FREQUENCY_CURR_GET -
+ * Fetch the current frequency of a specified PLL. The packet's arguments
+ * specify the PLL.
+ *
+ * CPUCP_PACKET_MAX_POWER_GET -
+ * Fetch the maximal power of the device.
+ *
+ * CPUCP_PACKET_MAX_POWER_SET -
+ * Set the maximal power of the device. The packet's arguments specify
+ * the power.
+ *
+ * CPUCP_PACKET_EEPROM_DATA_GET -
+ * Get EEPROM data from the CpuCP kernel. The buffer is specified in the
+ * addr field. The CPU will put the returned data size in the result
+ * field. In addition, the host's driver passes the max size it allows the
+ * CpuCP to write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_NIC_INFO_GET -
+ * Fetch information from the device regarding the NIC. the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to
+ * prevent data corruption in case of mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_TEMPERATURE_SET -
+ * Set the value of the offset property of a specified thermal sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_VOLTAGE_SET -
+ * Trigger the reset_history property of a specified voltage sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_CURRENT_SET -
+ * Trigger the reset_history property of a specified current sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_PCIE_THROUGHPUT_GET -
+ * Get throughput of PCIe.
+ * The packet's arguments specify the transaction direction (TX/RX).
+ * The window measurement is 10[msec], and the return value is in KB/sec.
+ *
+ * CPUCP_PACKET_PCIE_REPLAY_CNT_GET
+ * Replay count measures number of "replay" events, which is basicly
+ * number of retries done by PCIe.
+ *
+ * CPUCP_PACKET_TOTAL_ENERGY_GET -
+ * Total Energy is measurement of energy from the time FW Linux
+ * is loaded. It is calculated by multiplying the average power
+ * by time (passed from armcp start). The units are in MilliJouls.
+ *
+ * CPUCP_PACKET_PLL_INFO_GET -
+ * Fetch frequencies of PLL from the required PLL IP.
+ * The packet's arguments specify the device PLL type
+ * Pll type is the PLL from device pll_index enum.
+ * The result is composed of 4 outputs, each is 16-bit
+ * frequency in MHz.
+ *
+ * CPUCP_PACKET_POWER_GET -
+ * Fetch the present power consumption of the device (Current * Voltage).
+ *
+ * CPUCP_PACKET_NIC_PFC_SET -
+ * Enable/Disable the NIC PFC feature. The packet's arguments specify the
+ * NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_FAULT_GET -
+ * Fetch the current indication for local/remote faults from the NIC MAC.
+ * The result is 32-bit value of the relevant register.
+ *
+ * CPUCP_PACKET_NIC_LPBK_SET -
+ * Enable/Disable the MAC loopback feature. The packet's arguments specify
+ * the NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_MAC_INIT -
+ * Configure the NIC MAC channels. The packet's arguments specify the
+ * NIC port and the speed.
+ *
+ * CPUCP_PACKET_MSI_INFO_SET -
+ * set the index number for each supported msi type going from
+ * host to device
+ *
+ * CPUCP_PACKET_NIC_XPCS91_REGS_GET -
+ * Fetch the un/correctable counters values from the NIC MAC.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_GET -
+ * Fetch various NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_CLR -
+ * Clear the various NIC MAC counters in the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET -
+ * Fetch all NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_IS_IDLE_CHECK -
+ * Check if the device is IDLE in regard to the DMA/compute engines
+ * and QMANs. The f/w will return a bitmask where each bit represents
+ * a different engine or QMAN according to enum cpucp_idle_mask.
+ * The bit will be 1 if the engine is NOT idle.
+ *
+ * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET -
+ * Fetch all HBM replaced-rows and prending to be replaced rows data.
+ *
+ * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS -
+ * Fetch status of HBM rows pending replacement and need a reboot to
+ * be replaced.
+ *
+ * CPUCP_PACKET_POWER_SET -
+ * Resets power history of device to 0
+ *
+ * CPUCP_PACKET_ENGINE_CORE_ASID_SET -
+ * Packet to perform engine core ASID configuration
+ *
+ * CPUCP_PACKET_SEC_ATTEST_GET -
+ * Get the attestaion data that is collected during various stages of the
+ * boot sequence. the attestation data is also hashed with some unique
+ * number (nonce) provided by the host to prevent replay attacks.
+ * public key and certificate also provided as part of the FW response.
+ *
+ * CPUCP_PACKET_INFO_SIGNED_GET -
+ * Get the device information signed by the Trusted Platform device.
+ * device info data is also hashed with some unique number (nonce) provided
+ * by the host to prevent replay attacks. public key and certificate also
+ * provided as part of the FW response.
+ *
+ * CPUCP_PACKET_MONITOR_DUMP_GET -
+ * Get monitors registers dump from the CpuCP kernel.
+ * The CPU will put the registers dump in the a buffer allocated by the driver
+ * which address is passed via the CpuCp packet. In addition, the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to prevent
+ * data corruption in case of mismatched driver/FW versions.
+ * Obsolete.
+ *
+ * CPUCP_PACKET_GENERIC_PASSTHROUGH -
+ * Generic opcode for all firmware info that is only passed to host
+ * through the LKD, without getting parsed there.
+ *
+ * CPUCP_PACKET_ACTIVE_STATUS_SET -
+ * LKD sends FW indication whether device is free or in use, this indication is reported
+ * also to the BMC.
+ *
+ * CPUCP_PACKET_SOFT_RESET -
+ * Packet to perform soft-reset.
+ *
+ * CPUCP_PACKET_INTS_REGISTER -
+ * Packet to inform FW that queues have been established and LKD is ready to receive
+ * EQ events.
+ */
+
+enum cpucp_packet_id {
+ CPUCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
+ CPUCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
+ CPUCP_PACKET_TEMPERATURE_GET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_GET, /* sysfs */
+ CPUCP_PACKET_CURRENT_GET, /* sysfs */
+ CPUCP_PACKET_FAN_SPEED_GET, /* sysfs */
+ CPUCP_PACKET_PWM_GET, /* sysfs */
+ CPUCP_PACKET_PWM_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_GET, /* sysfs */
+ CPUCP_PACKET_LED_SET, /* debugfs */
+ CPUCP_PACKET_I2C_WR, /* debugfs */
+ CPUCP_PACKET_I2C_RD, /* debugfs */
+ CPUCP_PACKET_INFO_GET, /* IOCTL */
+ CPUCP_PACKET_FLASH_PROGRAM_REMOVED,
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
+ CPUCP_PACKET_TEST, /* internal */
+ CPUCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_SET, /* sysfs */
+ CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+ CPUCP_PACKET_NIC_INFO_GET, /* internal */
+ CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_SET, /* sysfs */
+ CPUCP_PACKET_CURRENT_SET, /* sysfs */
+ CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */
+ CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */
+ CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */
+ CPUCP_PACKET_PLL_INFO_GET, /* internal */
+ CPUCP_PACKET_NIC_STATUS, /* internal */
+ CPUCP_PACKET_POWER_GET, /* internal */
+ CPUCP_PACKET_NIC_PFC_SET, /* internal */
+ CPUCP_PACKET_NIC_FAULT_GET, /* internal */
+ CPUCP_PACKET_NIC_LPBK_SET, /* internal */
+ CPUCP_PACKET_NIC_MAC_CFG, /* internal */
+ CPUCP_PACKET_MSI_INFO_SET, /* internal */
+ CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
+ CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
+ CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */
+ CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */
+ CPUCP_PACKET_POWER_SET, /* internal */
+ CPUCP_PACKET_RESERVED, /* not used */
+ CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
+ CPUCP_PACKET_RESERVED2, /* not used */
+ CPUCP_PACKET_SEC_ATTEST_GET, /* internal */
+ CPUCP_PACKET_INFO_SIGNED_GET, /* internal */
+ CPUCP_PACKET_RESERVED4, /* not used */
+ CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
+ CPUCP_PACKET_RESERVED5, /* not used */
+ CPUCP_PACKET_RESERVED6, /* not used */
+ CPUCP_PACKET_RESERVED7, /* not used */
+ CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */
+ CPUCP_PACKET_RESERVED8, /* not used */
+ CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */
+ CPUCP_PACKET_RESERVED9, /* not used */
+ CPUCP_PACKET_RESERVED10, /* not used */
+ CPUCP_PACKET_RESERVED11, /* not used */
+ CPUCP_PACKET_RESERVED12, /* internal */
+ CPUCP_PACKET_RESERVED13, /* internal */
+ CPUCP_PACKET_SOFT_RESET, /* internal */
+ CPUCP_PACKET_INTS_REGISTER, /* internal */
+ CPUCP_PACKET_ID_MAX /* must be last */
+};
+
+#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
+
+#define CPUCP_PKT_CTL_RC_SHIFT 12
+#define CPUCP_PKT_CTL_RC_MASK 0x0000F000
+
+#define CPUCP_PKT_CTL_OPCODE_SHIFT 16
+#define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
+
+#define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000ull
+#define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32
+#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000ull
+#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48
+#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull
+
+#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull
+
+#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_PFC_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_LPBK_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_LPBK_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull
+
+/* heartbeat status bits */
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001
+
+struct cpucp_packet {
+ union {
+ __le64 value; /* For SET packets */
+ __le64 result; /* For GET packets */
+ __le64 addr; /* For PQ */
+ };
+
+ __le32 ctl;
+
+ __le32 fence; /* Signal to host that message is completed */
+
+ union {
+ struct {/* For temperature/current/voltage/fan/pwm get/set */
+ __le16 sensor_index;
+ __le16 type;
+ };
+
+ struct { /* For I2C read/write */
+ __u8 i2c_bus;
+ __u8 i2c_addr;
+ __u8 i2c_reg;
+ /*
+ * In legacy implemetations, i2c_len was not present,
+ * was unused and just added as pad.
+ * So if i2c_len is 0, it is treated as legacy
+ * and r/w 1 Byte, else if i2c_len is specified,
+ * its treated as new multibyte r/w support.
+ */
+ __u8 i2c_len;
+ };
+
+ struct {/* For PLL info fetch */
+ __le16 pll_type;
+ /* TODO pll_reg is kept temporary before removal */
+ __le16 pll_reg;
+ };
+
+ /* For any general request */
+ __le32 index;
+
+ /* For frequency get/set */
+ __le32 pll_index;
+
+ /* For led set */
+ __le32 led_index;
+
+ /* For get CpuCP info/EEPROM data/NIC info */
+ __le32 data_max_size;
+
+ /*
+ * For any general status bitmask. Shall be used whenever the
+ * result cannot be used to hold general purpose data.
+ */
+ __le32 status_mask;
+
+ /* random, used once number, for security packets */
+ __le32 nonce;
+ };
+
+ union {
+ /* For NIC requests */
+ __le32 port_index;
+
+ /* For Generic packet sub index */
+ __le32 pkt_subidx;
+ };
+};
+
+struct cpucp_unmask_irq_arr_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 irqs[];
+};
+
+struct cpucp_nic_status_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+struct cpucp_array_data_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+enum cpucp_led_index {
+ CPUCP_LED0_INDEX = 0,
+ CPUCP_LED1_INDEX,
+ CPUCP_LED2_INDEX,
+ CPUCP_LED_MAX_INDEX = CPUCP_LED2_INDEX
+};
+
+/*
+ * enum cpucp_packet_rc - Error return code
+ * @cpucp_packet_success -> in case of success.
+ * @cpucp_packet_invalid -> this is to support first generation platforms.
+ * @cpucp_packet_fault -> in case of processing error like failing to
+ * get device binding or semaphore etc.
+ * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported.
+ * @cpucp_packet_invalid_params -> when checking parameter like length of buffer
+ * or attribute value etc.
+ * @cpucp_packet_rc_max -> It indicates size of enum so should be at last.
+ */
+enum cpucp_packet_rc {
+ cpucp_packet_success,
+ cpucp_packet_invalid,
+ cpucp_packet_fault,
+ cpucp_packet_invalid_pkt,
+ cpucp_packet_invalid_params,
+ cpucp_packet_rc_max
+};
+
+/*
+ * cpucp_temp_type should adhere to hwmon_temp_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_temp_type {
+ cpucp_temp_input,
+ cpucp_temp_min = 4,
+ cpucp_temp_min_hyst,
+ cpucp_temp_max = 6,
+ cpucp_temp_max_hyst,
+ cpucp_temp_crit,
+ cpucp_temp_crit_hyst,
+ cpucp_temp_offset = 19,
+ cpucp_temp_lowest = 21,
+ cpucp_temp_highest = 22,
+ cpucp_temp_reset_history = 23,
+ cpucp_temp_warn = 24,
+ cpucp_temp_max_crit = 25,
+ cpucp_temp_max_warn = 26,
+};
+
+enum cpucp_in_attributes {
+ cpucp_in_input,
+ cpucp_in_min,
+ cpucp_in_max,
+ cpucp_in_lowest = 6,
+ cpucp_in_highest = 7,
+ cpucp_in_reset_history,
+ cpucp_in_intr_alarm_a,
+ cpucp_in_intr_alarm_b,
+};
+
+enum cpucp_curr_attributes {
+ cpucp_curr_input,
+ cpucp_curr_min,
+ cpucp_curr_max,
+ cpucp_curr_lowest = 6,
+ cpucp_curr_highest = 7,
+ cpucp_curr_reset_history
+};
+
+enum cpucp_fan_attributes {
+ cpucp_fan_input,
+ cpucp_fan_min = 2,
+ cpucp_fan_max
+};
+
+enum cpucp_pwm_attributes {
+ cpucp_pwm_input,
+ cpucp_pwm_enable
+};
+
+enum cpucp_pcie_throughput_attributes {
+ cpucp_pcie_throughput_tx,
+ cpucp_pcie_throughput_rx
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_reg_attributes {
+ cpucp_pll_nr_reg,
+ cpucp_pll_nf_reg,
+ cpucp_pll_od_reg,
+ cpucp_pll_div_factor_reg,
+ cpucp_pll_div_sel_reg
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_type_attributes {
+ cpucp_pll_cpu,
+ cpucp_pll_pci,
+};
+
+/*
+ * cpucp_power_type aligns with hwmon_power_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_power_type {
+ CPUCP_POWER_INPUT = 8,
+ CPUCP_POWER_INPUT_HIGHEST = 9,
+ CPUCP_POWER_RESET_INPUT_HISTORY = 11
+};
+
+/*
+ * MSI type enumeration table for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table (before CPUCP_NUM_OF_MSI_TYPES).
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum cpucp_msi_type {
+ CPUCP_EVENT_QUEUE_MSI_TYPE,
+ CPUCP_NIC_PORT1_MSI_TYPE,
+ CPUCP_NIC_PORT3_MSI_TYPE,
+ CPUCP_NIC_PORT5_MSI_TYPE,
+ CPUCP_NIC_PORT7_MSI_TYPE,
+ CPUCP_NIC_PORT9_MSI_TYPE,
+ CPUCP_EVENT_QUEUE_ERR_MSI_TYPE,
+ CPUCP_NUM_OF_MSI_TYPES
+};
+
+/*
+ * PLL enumeration table used for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table.
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum pll_index {
+ CPU_PLL = 0,
+ PCI_PLL = 1,
+ NIC_PLL = 2,
+ DMA_PLL = 3,
+ MESH_PLL = 4,
+ MME_PLL = 5,
+ TPC_PLL = 6,
+ IF_PLL = 7,
+ SRAM_PLL = 8,
+ NS_PLL = 9,
+ HBM_PLL = 10,
+ MSS_PLL = 11,
+ DDR_PLL = 12,
+ VID_PLL = 13,
+ BANK_PLL = 14,
+ MMU_PLL = 15,
+ IC_PLL = 16,
+ MC_PLL = 17,
+ EMMC_PLL = 18,
+ D2D_PLL = 19,
+ CS_PLL = 20,
+ C2C_PLL = 21,
+ NCH_PLL = 22,
+ C2M_PLL = 23,
+ PLL_MAX
+};
+
+enum rl_index {
+ TPC_RL = 0,
+ MME_RL,
+ EDMA_RL,
+};
+
+enum pvt_index {
+ PVT_SW,
+ PVT_SE,
+ PVT_NW,
+ PVT_NE
+};
+
+/* Event Queue Packets */
+
+struct eq_generic_event {
+ __le64 data[7];
+};
+
+/*
+ * CpuCP info
+ */
+
+#define CARD_NAME_MAX_LEN 16
+#define CPUCP_MAX_SENSORS 128
+#define CPUCP_MAX_NICS 128
+#define CPUCP_LANES_PER_NIC 4
+#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN 1024
+#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC)
+#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64)
+#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64)
+#define CPUCP_HBM_ROW_REPLACE_MAX 32
+
+struct cpucp_sensor {
+ __le32 type;
+ __le32 flags;
+};
+
+/**
+ * struct cpucp_card_types - ASIC card type.
+ * @cpucp_card_type_pci: PCI card.
+ * @cpucp_card_type_pmc: PCI Mezzanine Card.
+ */
+enum cpucp_card_types {
+ cpucp_card_type_pci,
+ cpucp_card_type_pmc
+};
+
+#define CPUCP_SEC_CONF_ENABLED_SHIFT 0
+#define CPUCP_SEC_CONF_ENABLED_MASK 0x00000001
+
+#define CPUCP_SEC_CONF_FLASH_WP_SHIFT 1
+#define CPUCP_SEC_CONF_FLASH_WP_MASK 0x00000002
+
+#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT 2
+#define CPUCP_SEC_CONF_EEPROM_WP_MASK 0x00000004
+
+/**
+ * struct cpucp_security_info - Security information.
+ * @config: configuration bit field
+ * @keys_num: number of stored keys
+ * @revoked_keys: revoked keys bit field
+ * @min_svn: minimal security version
+ */
+struct cpucp_security_info {
+ __u8 config;
+ __u8 keys_num;
+ __u8 revoked_keys;
+ __u8 min_svn;
+};
+
+/**
+ * struct cpucp_info - Info from CpuCP that is necessary to the host's driver
+ * @sensors: available sensors description.
+ * @kernel_version: CpuCP linux kernel version.
+ * @reserved: reserved field.
+ * @card_type: card configuration type.
+ * @card_location: in a server, each card has different connections topology
+ * depending on its location (relevant for PMC card type)
+ * @cpld_version: CPLD programmed F/W version.
+ * @infineon_version: Infineon main DC-DC version.
+ * @fuse_version: silicon production FUSE information.
+ * @thermal_version: thermald S/W version.
+ * @cpucp_version: CpuCP S/W version.
+ * @infineon_second_stage_version: Infineon 2nd stage DC-DC version.
+ * @dram_size: available DRAM size.
+ * @card_name: card name that will be displayed in HWMON subsystem on the host
+ * @tpc_binning_mask: TPC binning mask, 1 bit per TPC instance
+ * (0 = functional, 1 = binned)
+ * @decoder_binning_mask: Decoder binning mask, 1 bit per decoder instance
+ * (0 = functional, 1 = binned), maximum 1 per dcore
+ * @sram_binning: Categorize SRAM functionality
+ * (0 = fully functional, 1 = lower-half is not functional,
+ * 2 = upper-half is not functional)
+ * @sec_info: security information
+ * @pll_map: Bit map of supported PLLs for current ASIC version.
+ * @mme_binning_mask: MME binning mask,
+ * bits [0:6] <==> dcore0 mme fma
+ * bits [7:13] <==> dcore1 mme fma
+ * bits [14:20] <==> dcore0 mme ima
+ * bits [21:27] <==> dcore1 mme ima
+ * For each group, if the 6th bit is set then first 5 bits
+ * represent the col's idx [0-31], otherwise these bits are
+ * ignored, and col idx 32 is binned. 7th bit is don't care.
+ * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
+ * (0 = functional 1 = binned)
+ * @memory_repair_flag: eFuse flag indicating memory repair
+ * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance
+ * (0 = functional 1 = binned)
+ * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance
+ * (0 = functional 1 = binned)
+ * @interposer_version: Interposer version programmed in eFuse
+ * @substrate_version: Substrate version programmed in eFuse
+ * @eq_health_check_supported: eq health check feature supported in FW.
+ * @fw_hbm_region_size: Size in bytes of FW reserved region in HBM.
+ * @fw_os_version: Firmware OS Version
+ */
+struct cpucp_info {
+ struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
+ __u8 kernel_version[VERSION_MAX_LEN];
+ __le32 reserved;
+ __le32 card_type;
+ __le32 card_location;
+ __le32 cpld_version;
+ __le32 infineon_version;
+ __u8 fuse_version[VERSION_MAX_LEN];
+ __u8 thermal_version[VERSION_MAX_LEN];
+ __u8 cpucp_version[VERSION_MAX_LEN];
+ __le32 infineon_second_stage_version;
+ __le64 dram_size;
+ char card_name[CARD_NAME_MAX_LEN];
+ __le64 tpc_binning_mask;
+ __le64 decoder_binning_mask;
+ __u8 sram_binning;
+ __u8 dram_binning_mask;
+ __u8 memory_repair_flag;
+ __u8 edma_binning_mask;
+ __u8 xbar_binning_mask;
+ __u8 interposer_version;
+ __u8 substrate_version;
+ __u8 eq_health_check_supported;
+ struct cpucp_security_info sec_info;
+ __le32 fw_hbm_region_size;
+ __u8 pll_map[PLL_MAP_LEN];
+ __le64 mme_binning_mask;
+ __u8 fw_os_version[VERSION_MAX_LEN];
+};
+
+struct cpucp_mac_addr {
+ __u8 mac_addr[ETH_ALEN];
+};
+
+enum cpucp_serdes_type {
+ TYPE_1_SERDES_TYPE,
+ TYPE_2_SERDES_TYPE,
+ HLS1_SERDES_TYPE,
+ HLS1H_SERDES_TYPE,
+ HLS2_SERDES_TYPE,
+ HLS2_TYPE_1_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE, /* number of types */
+ UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */
+};
+
+struct cpucp_nic_info {
+ struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS];
+ __le64 link_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
+ __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le16 serdes_type; /* enum cpucp_serdes_type */
+ __le16 tx_swap_map[CPUCP_MAX_NICS];
+ __u8 reserved[6];
+};
+
+#define PAGE_DISCARD_MAX 64
+
+struct page_discard_info {
+ __u8 num_entries;
+ __u8 reserved[7];
+ __le32 mmu_page_idx[PAGE_DISCARD_MAX];
+};
+
+/*
+ * struct frac_val - fracture value represented by "integer.frac".
+ * @integer: the integer part of the fracture value;
+ * @frac: the fracture part of the fracture value.
+ */
+struct frac_val {
+ union {
+ struct {
+ __le16 integer;
+ __le16 frac;
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
+ * @integer: the integer part of the SER value;
+ * @exp: the exponent part of the SER value.
+ */
+struct ser_val {
+ __le16 integer;
+ __le16 exp;
+};
+
+/*
+ * struct cpucp_nic_status - describes the status of a NIC port.
+ * @port: NIC port index.
+ * @bad_format_cnt: e.g. CRC.
+ * @responder_out_of_sequence_psn_cnt: e.g NAK.
+ * @high_ber_reinit_cnt: link reinit due to high BER.
+ * @correctable_err_cnt: e.g. bit-flip.
+ * @uncorrectable_err_cnt: e.g. MAC errors.
+ * @retraining_cnt: re-training counter.
+ * @up: is port up.
+ * @pcs_link: has PCS link.
+ * @phy_ready: is PHY ready.
+ * @auto_neg: is Autoneg enabled.
+ * @timeout_retransmission_cnt: timeout retransmission events.
+ * @high_ber_cnt: high ber events.
+ * @pre_fec_ser: pre FEC SER value.
+ * @post_fec_ser: post FEC SER value.
+ * @throughput: measured throughput.
+ * @latency: measured latency.
+ */
+struct cpucp_nic_status {
+ __le32 port;
+ __le32 bad_format_cnt;
+ __le32 responder_out_of_sequence_psn_cnt;
+ __le32 high_ber_reinit;
+ __le32 correctable_err_cnt;
+ __le32 uncorrectable_err_cnt;
+ __le32 retraining_cnt;
+ __u8 up;
+ __u8 pcs_link;
+ __u8 phy_ready;
+ __u8 auto_neg;
+ __le32 timeout_retransmission_cnt;
+ __le32 high_ber_cnt;
+ struct ser_val pre_fec_ser;
+ struct ser_val post_fec_ser;
+ struct frac_val bandwidth;
+ struct frac_val lat;
+};
+
+enum cpucp_hbm_row_replace_cause {
+ REPLACE_CAUSE_DOUBLE_ECC_ERR,
+ REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR,
+};
+
+struct cpucp_hbm_row_info {
+ __u8 hbm_idx;
+ __u8 pc;
+ __u8 sid;
+ __u8 bank_idx;
+ __le16 row_addr;
+ __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */
+ __u8 pad;
+};
+
+struct cpucp_hbm_row_replaced_rows_info {
+ __le16 num_replaced_rows;
+ __u8 pad[6];
+ struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
+};
+
+enum cpu_reset_status {
+ CPU_RST_STATUS_NA = 0,
+ CPU_RST_STATUS_SOFT_RST_DONE = 1,
+};
+
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+
+/*
+ * struct cpucp_sec_attest_info - attestation report of the boot
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @nonce: number only used once. random number provided by host. this also
+ * passed to the quote command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pcr_quote: attestation report data structure
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @quote_sig: signature structure of the attestation report
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the attestation signing key
+ */
+struct cpucp_sec_attest_info {
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __le16 pad0;
+ __le32 nonce;
+ __le16 pcr_quote_len;
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig_len;
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+/*
+ * struct cpucp_dev_info_signed - device information signed by a secured device
+ * @info: device information structure as defined above
+ * @nonce: number only used once. random number provided by host. this number is
+ * hashed and signed along with the device information.
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @info_sig: signature of the info + nonce data.
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key info signed info data
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the signing key
+ */
+struct cpucp_dev_info_signed {
+ struct cpucp_info info; /* assumed to be 64bit aligned */
+ __le32 nonce;
+ __le32 pad0;
+ __u8 info_sig_len;
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+#define DCORE_MON_REGS_SZ 512
+/*
+ * struct dcore_monitor_regs_data - DCORE monitor regs data.
+ * the structure follows sync manager block layout. Obsolete.
+ * @mon_pay_addrl: array of payload address low bits.
+ * @mon_pay_addrh: array of payload address high bits.
+ * @mon_pay_data: array of payload data.
+ * @mon_arm: array of monitor arm.
+ * @mon_status: array of monitor status.
+ */
+struct dcore_monitor_regs_data {
+ __le32 mon_pay_addrl[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_addrh[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_data[DCORE_MON_REGS_SZ];
+ __le32 mon_arm[DCORE_MON_REGS_SZ];
+ __le32 mon_status[DCORE_MON_REGS_SZ];
+};
+
+/* contains SM data for each SYNC_MNGR (Obsolete) */
+struct cpucp_monitor_dump {
+ struct dcore_monitor_regs_data sync_mngr_w_s;
+ struct dcore_monitor_regs_data sync_mngr_e_s;
+ struct dcore_monitor_regs_data sync_mngr_w_n;
+ struct dcore_monitor_regs_data sync_mngr_e_n;
+};
+
+/*
+ * The Type of the generic request (and other input arguments) will be fetched from user by reading
+ * from "pkt_subidx" field in struct cpucp_packet.
+ *
+ * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions.
+ */
+enum hl_passthrough_type {
+ HL_PASSTHROUGH_VERSIONS,
+};
+
+#endif /* CPUCP_IF_H */
diff --git a/include/linux/habanalabs/hl_boot_if.h b/include/linux/habanalabs/hl_boot_if.h
new file mode 100644
index 000000000000..93366d5621fd
--- /dev/null
+++ b/include/linux/habanalabs/hl_boot_if.h
@@ -0,0 +1,792 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HL_BOOT_IF_H
+#define HL_BOOT_IF_H
+
+#define LKD_HARD_RESET_MAGIC 0xED7BD694 /* deprecated - do not use */
+#define HL_POWER9_HOST_MAGIC 0x1DA30009
+
+#define BOOT_FIT_SRAM_OFFSET 0x200000
+
+#define VERSION_MAX_LEN 128
+
+enum cpu_boot_err {
+ CPU_BOOT_ERR_DRAM_INIT_FAIL = 0,
+ CPU_BOOT_ERR_FIT_CORRUPTED = 1,
+ CPU_BOOT_ERR_TS_INIT_FAIL = 2,
+ CPU_BOOT_ERR_DRAM_SKIPPED = 3,
+ CPU_BOOT_ERR_BMC_WAIT_SKIPPED = 4,
+ CPU_BOOT_ERR_NIC_DATA_NOT_RDY = 5,
+ CPU_BOOT_ERR_NIC_FW_FAIL = 6,
+ CPU_BOOT_ERR_SECURITY_NOT_RDY = 7,
+ CPU_BOOT_ERR_SECURITY_FAIL = 8,
+ CPU_BOOT_ERR_EFUSE_FAIL = 9,
+ CPU_BOOT_ERR_PRI_IMG_VER_FAIL = 10,
+ CPU_BOOT_ERR_SEC_IMG_VER_FAIL = 11,
+ CPU_BOOT_ERR_PLL_FAIL = 12,
+ CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13,
+ CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18,
+ CPU_BOOT_ERR_BINNING_FAIL = 19,
+ CPU_BOOT_ERR_TPM_FAIL = 20,
+ CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21,
+ CPU_BOOT_ERR_EEPROM_FAIL = 22,
+ CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL = 23,
+ CPU_BOOT_ERR_ENABLED = 31,
+ CPU_BOOT_ERR_SCND_EN = 63,
+ CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * Mask for fatal failures
+ * This mask contains all possible fatal failures, and a dynamic code
+ * will clear the non-relevant ones.
+ */
+#define CPU_BOOT_ERR_FATAL_MASK \
+ ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \
+ (1 << CPU_BOOT_ERR_PLL_FAIL) | \
+ (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \
+ (1 << CPU_BOOT_ERR_BINNING_FAIL) | \
+ (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
+ (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \
+ (1 << CPU_BOOT_ERR_EEPROM_FAIL))
+
+/*
+ * CPU error bits in BOOT_ERROR registers
+ *
+ * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed.
+ * DRAM is not reliable to use.
+ *
+ * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the
+ * image provided by the host has failed.
+ *
+ * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed.
+ * Boot continues as usual, but keep in
+ * mind this is a warning.
+ *
+ * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped.
+ * Skipping DRAM initialization has been
+ * requested (e.g. strap, command, etc.)
+ * and FW skipped the DRAM initialization.
+ * Host can initialize the DRAM.
+ *
+ * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped.
+ * Meaning the BMC data might not be
+ * available until reset.
+ *
+ * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready.
+ * BMC has not provided the NIC data yet.
+ * Once provided this bit will be cleared.
+ *
+ * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed.
+ * The NIC FW loading and initialization
+ * failed. This means NICs are not usable.
+ *
+ * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been
+ * started, but is not ready yet - chip
+ * cannot be accessed.
+ *
+ * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed.
+ * The tasks are security init (root of
+ * trust), boot authentication (chain of
+ * trust), data packets authentication.
+ *
+ * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed.
+ * The PCI device ID might be wrong.
+ *
+ * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL Verification of primary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot primary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL Verification of secondary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot secondary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one
+ * of the PLLs remains in REF_CLK
+ *
+ * CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
+ * should be contacted.
+ *
+ * CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR Critical error was detected during
+ * the execution of ppboot or preboot.
+ * for example: stack overflow.
+ *
+ * CPU_BOOT_ERR0_BINNING_FAIL Binning settings failed, meaning
+ * malfunctioning components might still be
+ * in use.
+ *
+ * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed.
+ *
+ * CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL Failed to set threshold for tmperature
+ * sensor.
+ *
+ * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults
+ * are used.
+ *
+ * CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL Failed scrubbing the Engines/ARCFarm
+ * memories. Boot disabled until reset.
+ *
+ * CPU_BOOT_ERR0_ENABLED Error registers enabled.
+ * This is a main indication that the
+ * running FW populates the error
+ * registers. Meaning the error bits are
+ * not garbage, but actual error statuses.
+ */
+#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << CPU_BOOT_ERR_DRAM_INIT_FAIL)
+#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << CPU_BOOT_ERR_FIT_CORRUPTED)
+#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << CPU_BOOT_ERR_TS_INIT_FAIL)
+#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << CPU_BOOT_ERR_DRAM_SKIPPED)
+#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << CPU_BOOT_ERR_BMC_WAIT_SKIPPED)
+#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << CPU_BOOT_ERR_NIC_DATA_NOT_RDY)
+#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << CPU_BOOT_ERR_NIC_FW_FAIL)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << CPU_BOOT_ERR_SECURITY_NOT_RDY)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << CPU_BOOT_ERR_SECURITY_FAIL)
+#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << CPU_BOOT_ERR_EFUSE_FAIL)
+#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << CPU_BOOT_ERR_PRI_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << CPU_BOOT_ERR_SEC_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_PLL_FAIL (1 << CPU_BOOT_ERR_PLL_FAIL)
+#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL)
+#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR)
+#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
+#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
+#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL)
+#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL)
+#define CPU_BOOT_ERR0_ENG_ARC_MEM_SCRUB_FAIL (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL)
+#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+
+enum cpu_boot_dev_sts {
+ CPU_BOOT_DEV_STS_SECURITY_EN = 0,
+ CPU_BOOT_DEV_STS_DEBUG_EN = 1,
+ CPU_BOOT_DEV_STS_WATCHDOG_EN = 2,
+ CPU_BOOT_DEV_STS_DRAM_INIT_EN = 3,
+ CPU_BOOT_DEV_STS_BMC_WAIT_EN = 4,
+ CPU_BOOT_DEV_STS_E2E_CRED_EN = 5,
+ CPU_BOOT_DEV_STS_HBM_CRED_EN = 6,
+ CPU_BOOT_DEV_STS_RL_EN = 7,
+ CPU_BOOT_DEV_STS_SRAM_SCR_EN = 8,
+ CPU_BOOT_DEV_STS_DRAM_SCR_EN = 9,
+ CPU_BOOT_DEV_STS_FW_HARD_RST_EN = 10,
+ CPU_BOOT_DEV_STS_PLL_INFO_EN = 11,
+ CPU_BOOT_DEV_STS_SP_SRAM_EN = 12,
+ CPU_BOOT_DEV_STS_CLK_GATE_EN = 13,
+ CPU_BOOT_DEV_STS_HBM_ECC_EN = 14,
+ CPU_BOOT_DEV_STS_PKT_PI_ACK_EN = 15,
+ CPU_BOOT_DEV_STS_FW_LD_COM_EN = 16,
+ CPU_BOOT_DEV_STS_FW_IATU_CONF_EN = 17,
+ CPU_BOOT_DEV_STS_FW_NIC_MAC_EN = 18,
+ CPU_BOOT_DEV_STS_DYN_PLL_EN = 19,
+ CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN = 20,
+ CPU_BOOT_DEV_STS_EQ_INDEX_EN = 21,
+ CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN = 22,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN = 23,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24,
+ CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25,
+ CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26,
+ CPU_BOOT_DEV_STS_ENABLED = 31,
+ CPU_BOOT_DEV_STS_SCND_EN = 63,
+ CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers
+ *
+ * CPU_BOOT_DEV_STS0_SECURITY_EN Security is Enabled.
+ * This is an indication for security
+ * enabled in FW, which means that
+ * all conditions for security are met:
+ * device is indicated as security enabled,
+ * registers are protected, and device
+ * uses keys for image verification.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled.
+ * Enabled when JTAG or DEBUG is enabled
+ * in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled.
+ * Watchdog is enabled in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled.
+ * DRAM initialization has been done in FW.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled.
+ * If set, it means that during boot,
+ * FW waited for BMC data.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized.
+ * FW initialized E2E credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized.
+ * FW initialized HBM credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized.
+ * FW initialized rate limiter.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled.
+ * FW initialized SRAM scrambler.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled.
+ * FW initialized DRAM scrambler.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN FW hard reset procedure is enabled.
+ * FW has the hard reset procedure
+ * implemented. This means that FW will
+ * perform hard reset procedure on
+ * receiving the halt-machine event.
+ * Initialized in: preboot, u-boot, linux
+ *
+ * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available
+ * for use.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled.
+ * FW handles HBM ECC indications.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd
+ * is set to the PI counter.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_LD_COM_EN Flexible FW loading communication
+ * protocol is enabled.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN FW iATU configuration is enabled.
+ * This bit if set, means the iATU has been
+ * configured and is ready for use.
+ * Initialized in: ppboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and
+ * any access to them is done via the FW.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled.
+ * FW sends to host a bitmap of supported
+ * PLLs.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN GIC access permission only from
+ * previleged entity. FW sets this status
+ * bit for host. If this bit is set then
+ * GIC can not be accessed from host.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_EQ_INDEX_EN Event Queue (EQ) index is a running
+ * index for each new event sent to host.
+ * This is used as a method in host to
+ * identify that the waiting event in
+ * queue is actually a new event which
+ * was not served before.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
+ * prevent IRQs overriding each other.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN
+ * NIC STAT and XPCS91 access is restricted
+ * and is done via FW only.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN
+ * NIC STAT get all is supported.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN
+ * F/W checks if the device is idle by reading defined set
+ * of registers. It returns a bitmask of all the engines,
+ * where a bit is set if the engine is not idle.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MAP_HWMON_EN
+ * If set, means f/w supports proprietary
+ * HWMON enum mapping to cpucp enums.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
+ * This is a main indication that the
+ * running FW populates the device status
+ * register. Meaning the device status
+ * bits are not garbage, but actual
+ * statuses.
+ * Initialized in: preboot
+ *
+ */
+#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << CPU_BOOT_DEV_STS_SECURITY_EN)
+#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << CPU_BOOT_DEV_STS_DEBUG_EN)
+#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << CPU_BOOT_DEV_STS_WATCHDOG_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << CPU_BOOT_DEV_STS_DRAM_INIT_EN)
+#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << CPU_BOOT_DEV_STS_BMC_WAIT_EN)
+#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << CPU_BOOT_DEV_STS_E2E_CRED_EN)
+#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << CPU_BOOT_DEV_STS_HBM_CRED_EN)
+#define CPU_BOOT_DEV_STS0_RL_EN (1 << CPU_BOOT_DEV_STS_RL_EN)
+#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_SRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_DRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << CPU_BOOT_DEV_STS_FW_HARD_RST_EN)
+#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << CPU_BOOT_DEV_STS_PLL_INFO_EN)
+#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << CPU_BOOT_DEV_STS_SP_SRAM_EN)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << CPU_BOOT_DEV_STS_CLK_GATE_EN)
+#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << CPU_BOOT_DEV_STS_HBM_ECC_EN)
+#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << CPU_BOOT_DEV_STS_PKT_PI_ACK_EN)
+#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << CPU_BOOT_DEV_STS_FW_LD_COM_EN)
+#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << CPU_BOOT_DEV_STS_FW_IATU_CONF_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_MAC_EN)
+#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << CPU_BOOT_DEV_STS_DYN_PLL_EN)
+#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN)
+#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << CPU_BOOT_DEV_STS_EQ_INDEX_EN)
+#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN)
+#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN)
+#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+
+enum cpu_boot_status {
+ CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
+ CPU_BOOT_STATUS_IN_WFE = 1,
+ CPU_BOOT_STATUS_DRAM_RDY = 2,
+ CPU_BOOT_STATUS_SRAM_AVAIL = 3,
+ CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */
+ CPU_BOOT_STATUS_IN_PREBOOT = 5,
+ CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */
+ CPU_BOOT_STATUS_IN_UBOOT = 7,
+ CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */
+ /* U-Boot console prompt activated, commands are not processed */
+ CPU_BOOT_STATUS_UBOOT_NOT_READY = 10,
+ /* Finished NICs init, reported after DRAM and NICs */
+ CPU_BOOT_STATUS_NIC_FW_RDY = 11,
+ CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
+ /* Last boot loader progress status, ready to receive commands */
+ CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ /* Internal Boot finished, ready for boot-fit */
+ CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
+ /* Internal Security has been initialized, device can be accessed */
+ CPU_BOOT_STATUS_SECURITY_READY = 17,
+ /* FW component is preparing to shutdown and communication with host is not available */
+ CPU_BOOT_STATUS_FW_SHUTDOWN_PREP = 18,
+};
+
+enum kmd_msg {
+ KMD_MSG_NA = 0,
+ KMD_MSG_GOTO_WFE,
+ KMD_MSG_FIT_RDY,
+ KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
+ KMD_MSG_LAST
+};
+
+enum cpu_msg_status {
+ CPU_MSG_CLR = 0,
+ CPU_MSG_OK,
+ CPU_MSG_ERR,
+};
+
+/* communication registers mapping - consider ABI when changing */
+struct cpu_dyn_regs {
+ __le32 cpu_pq_base_addr_low;
+ __le32 cpu_pq_base_addr_high;
+ __le32 cpu_pq_length;
+ __le32 cpu_pq_init_status;
+ __le32 cpu_eq_base_addr_low;
+ __le32 cpu_eq_base_addr_high;
+ __le32 cpu_eq_length;
+ __le32 cpu_eq_ci;
+ __le32 cpu_cq_base_addr_low;
+ __le32 cpu_cq_base_addr_high;
+ __le32 cpu_cq_length;
+ __le32 cpu_pf_pq_pi;
+ __le32 cpu_boot_dev_sts0;
+ __le32 cpu_boot_dev_sts1;
+ __le32 cpu_boot_err0;
+ __le32 cpu_boot_err1;
+ __le32 cpu_boot_status;
+ __le32 fw_upd_sts;
+ __le32 fw_upd_cmd;
+ __le32 fw_upd_pending_sts;
+ __le32 fuse_ver_offset;
+ __le32 preboot_ver_offset;
+ __le32 uboot_ver_offset;
+ __le32 hw_state;
+ __le32 kmd_msg_to_cpu;
+ __le32 cpu_cmd_status_to_host;
+ __le32 gic_host_pi_upd_irq;
+ __le32 gic_tpc_qm_irq_ctrl;
+ __le32 gic_mme_qm_irq_ctrl;
+ __le32 gic_dma_qm_irq_ctrl;
+ __le32 gic_nic_qm_irq_ctrl;
+ __le32 gic_dma_core_irq_ctrl;
+ __le32 gic_host_halt_irq;
+ __le32 gic_host_ints_irq;
+ __le32 gic_host_soft_rst_irq;
+ __le32 gic_rot_qm_irq_ctrl;
+ __le32 cpu_rst_status;
+ __le32 eng_arc_irq_ctrl;
+ __le32 reserved1[20]; /* reserve for future use */
+};
+
+/* TODO: remove the desc magic after the code is updated to use message */
+/* HCDM - Habana Communications Descriptor Magic */
+#define HL_COMMS_DESC_MAGIC 0x4843444D
+#define HL_COMMS_DESC_VER 3
+
+/* HCMv - Habana Communications Message + header version */
+#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00
+#define HL_COMMS_MSG_MAGIC_MASK 0xFFFFFF00
+#define HL_COMMS_MSG_MAGIC_VER_MASK 0xFF
+
+#define HL_COMMS_MSG_MAGIC_VER(ver) (HL_COMMS_MSG_MAGIC_VALUE | \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC
+#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1)
+#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2)
+#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3)
+
+#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \
+ (((magic) & HL_COMMS_MSG_MAGIC_MASK) == \
+ HL_COMMS_MSG_MAGIC_VALUE)
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_VERSION(magic, ver) \
+ (((magic) & HL_COMMS_MSG_MAGIC_VER_MASK) >= \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE(magic, ver) \
+ (HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC((magic)) && \
+ HL_COMMS_MSG_MAGIC_VALIDATE_VERSION((magic), (ver)))
+
+enum comms_msg_type {
+ HL_COMMS_DESC_TYPE = 0,
+ HL_COMMS_RESET_CAUSE_TYPE = 1,
+ HL_COMMS_FW_CFG_SKIP_TYPE = 2,
+ HL_COMMS_BINNING_CONF_TYPE = 3,
+};
+
+/*
+ * Binning information shared between LKD and FW
+ * @tpc_mask_l - TPC binning information lower 64 bit
+ * @dec_mask - Decoder binning information
+ * @dram_mask - DRAM binning information
+ * @edma_mask - EDMA binning information
+ * @mme_mask_l - MME binning information lower 32
+ * @mme_mask_h - MME binning information upper 32
+ * @rot_mask - Rotator binning information
+ * @xbar_mask - xBAR binning information
+ * @reserved - reserved field for future binning info w/o ABI change
+ * @tpc_mask_h - TPC binning information upper 64 bit
+ * @nic_mask - NIC binning information
+ */
+struct lkd_fw_binning_info {
+ __le64 tpc_mask_l;
+ __le32 dec_mask;
+ __le32 dram_mask;
+ __le32 edma_mask;
+ __le32 mme_mask_l;
+ __le32 mme_mask_h;
+ __le32 rot_mask;
+ __le32 xbar_mask;
+ __le32 reserved0;
+ __le64 tpc_mask_h;
+ __le64 nic_mask;
+ __le32 reserved1[8];
+};
+
+/* TODO: remove this struct after the code is updated to use message */
+/* this is the comms descriptor header - meta data */
+struct comms_desc_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the descriptor w/o header */
+ __le16 size; /* size of the descriptor w/o header */
+ __u8 version; /* descriptor version */
+ __u8 reserved[5]; /* pad to 64 bit */
+};
+
+/* this is the comms message header - meta data */
+struct comms_msg_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the message w/o header */
+ __le16 size; /* size of the message w/o header */
+ __u8 version; /* message payload version */
+ __u8 type; /* message type */
+ __u8 reserved[4]; /* pad to 64 bit */
+};
+
+enum lkd_fw_ascii_msg_lvls {
+ LKD_FW_ASCII_MSG_ERR = 0,
+ LKD_FW_ASCII_MSG_WRN = 1,
+ LKD_FW_ASCII_MSG_INF = 2,
+ LKD_FW_ASCII_MSG_DBG = 3,
+};
+
+#define LKD_FW_ASCII_MSG_MAX_LEN 128
+#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
+
+struct lkd_fw_ascii_msg {
+ __u8 valid;
+ __u8 msg_lvl;
+ __u8 reserved[6];
+ char msg[LKD_FW_ASCII_MSG_MAX_LEN];
+};
+
+/* this is the main FW descriptor - consider ABI when changing */
+struct lkd_fw_comms_desc {
+ struct comms_desc_header header;
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ __le64 img_addr; /* address for next FW component load */
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ __le32 rsvd_mem_size_mb; /* reserved memory size [MB] for FW/SVE */
+ char reserved1[4];
+};
+
+enum comms_reset_cause {
+ HL_RESET_CAUSE_UNKNOWN = 0,
+ HL_RESET_CAUSE_HEARTBEAT = 1,
+ HL_RESET_CAUSE_TDR = 2,
+};
+
+/* TODO: remove define after struct name is aligned on all projects */
+#define lkd_msg_comms lkd_fw_comms_msg
+
+/* this is the comms message descriptor */
+struct lkd_fw_comms_msg {
+ struct comms_msg_header header;
+ /* union for future expantions of new messages */
+ union {
+ struct {
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ /* address for next FW component load */
+ __le64 img_addr;
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ /* reserved memory size [MB] for FW/SVE */
+ __le32 rsvd_mem_size_mb;
+ char reserved1[4];
+ };
+ struct {
+ __u8 reset_cause;
+ };
+ struct {
+ __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
+ };
+ struct lkd_fw_binning_info binning_conf;
+ };
+};
+
+/*
+ * LKD commands:
+ *
+ * COMMS_NOOP Used to clear the command register and no actual
+ * command is send.
+ *
+ * COMMS_CLR_STS Clear status command - FW should clear the
+ * status register. Used for synchronization
+ * between the commands as part of the race free
+ * protocol.
+ *
+ * COMMS_RST_STATE Reset the current communication state which is
+ * kept by FW for proper responses.
+ * Should be used in the beginning of the
+ * communication cycle to clean any leftovers from
+ * previous communication attempts.
+ *
+ * COMMS_PREP_DESC Prepare descriptor for setting up the
+ * communication and other dynamic data:
+ * struct lkd_fw_comms_desc.
+ * This command has a parameter stating the next FW
+ * component size, so the FW can actually prepare a
+ * space for it and in the status response provide
+ * the descriptor offset. The Offset of the next FW
+ * data component is a part of the descriptor
+ * structure.
+ *
+ * COMMS_DATA_RDY The FW data has been uploaded and is ready for
+ * validation.
+ *
+ * COMMS_EXEC Execute the next FW component.
+ *
+ * COMMS_RST_DEV Reset the device.
+ *
+ * COMMS_GOTO_WFE Execute WFE command. Allowed only on non-secure
+ * devices.
+ *
+ * COMMS_SKIP_BMC Perform actions required for BMC-less servers.
+ * Do not wait for BMC response.
+ *
+ * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
+ * space is allocated in a ELBI access only
+ * address range.
+ *
+ */
+enum comms_cmd {
+ COMMS_NOOP = 0,
+ COMMS_CLR_STS = 1,
+ COMMS_RST_STATE = 2,
+ COMMS_PREP_DESC = 3,
+ COMMS_DATA_RDY = 4,
+ COMMS_EXEC = 5,
+ COMMS_RST_DEV = 6,
+ COMMS_GOTO_WFE = 7,
+ COMMS_SKIP_BMC = 8,
+ COMMS_PREP_DESC_ELBI = 10,
+ COMMS_INVLD_LAST
+};
+
+#define COMMS_COMMAND_SIZE_SHIFT 0
+#define COMMS_COMMAND_SIZE_MASK 0x1FFFFFF
+#define COMMS_COMMAND_CMD_SHIFT 27
+#define COMMS_COMMAND_CMD_MASK 0xF8000000
+
+/*
+ * LKD command to FW register structure
+ * @size - FW component size
+ * @cmd - command from enum comms_cmd
+ */
+struct comms_command {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 size :25; /* 32MB max. */
+ u32 reserved :2;
+ enum comms_cmd cmd :5; /* 32 commands */
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * FW status
+ *
+ * COMMS_STS_NOOP Used to clear the status register and no actual
+ * status is provided.
+ *
+ * COMMS_STS_ACK Command has been received and recognized.
+ *
+ * COMMS_STS_OK Command execution has finished successfully.
+ *
+ * COMMS_STS_ERR Command execution was unsuccessful and resulted
+ * in error.
+ *
+ * COMMS_STS_VALID_ERR FW validation has failed.
+ *
+ * COMMS_STS_TIMEOUT_ERR Command execution has timed out.
+ */
+enum comms_sts {
+ COMMS_STS_NOOP = 0,
+ COMMS_STS_ACK = 1,
+ COMMS_STS_OK = 2,
+ COMMS_STS_ERR = 3,
+ COMMS_STS_VALID_ERR = 4,
+ COMMS_STS_TIMEOUT_ERR = 5,
+ COMMS_STS_INVLD_LAST
+};
+
+/* RAM types for FW components loading - defines the base address */
+enum comms_ram_types {
+ COMMS_SRAM = 0,
+ COMMS_DRAM = 1,
+};
+
+#define COMMS_STATUS_OFFSET_SHIFT 0
+#define COMMS_STATUS_OFFSET_MASK 0x03FFFFFF
+#define COMMS_STATUS_OFFSET_ALIGN_SHIFT 2
+#define COMMS_STATUS_RAM_TYPE_SHIFT 26
+#define COMMS_STATUS_RAM_TYPE_MASK 0x0C000000
+#define COMMS_STATUS_STATUS_SHIFT 28
+#define COMMS_STATUS_STATUS_MASK 0xF0000000
+
+/*
+ * FW status to LKD register structure
+ * @offset - an offset from the base of the ram_type shifted right by
+ * 2 bits (always aligned to 32 bits).
+ * Allows a maximum addressable offset of 256MB from RAM base.
+ * Example: for real offset in RAM of 0x800000 (8MB), the value
+ * in offset field is (0x800000 >> 2) = 0x200000.
+ * @ram_type - the RAM type that should be used for offset from
+ * enum comms_ram_types
+ * @status - status from enum comms_sts
+ */
+struct comms_status {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 offset :26;
+ enum comms_ram_types ram_type :2;
+ enum comms_sts status :4; /* 16 statuses */
+ };
+ __le32 val;
+ };
+};
+
+#define NAME_MAX_LEN 32 /* bytes */
+struct hl_module_data {
+ __u8 name[NAME_MAX_LEN];
+ __u8 version[VERSION_MAX_LEN];
+};
+
+/**
+ * struct hl_component_versions - versions associated with hl component.
+ * @struct_size: size of all the struct (including dynamic size of modules).
+ * @modules_offset: offset of the modules field in this struct.
+ * @component: version of the component itself.
+ * @fw_os: Firmware OS Version.
+ * @comp_name: Name of the component.
+ * @modules_counter: number of set bits in modules_mask.
+ * @reserved: reserved for future use.
+ * @modules: versions of the component's modules. Elborated explanation in
+ * struct cpucp_versions.
+ */
+struct hl_component_versions {
+ __le16 struct_size;
+ __le16 modules_offset;
+ __u8 component[VERSION_MAX_LEN];
+ __u8 fw_os[VERSION_MAX_LEN];
+ __u8 comp_name[NAME_MAX_LEN];
+ __u8 modules_counter;
+ __u8 reserved[3];
+ struct hl_module_data modules[];
+};
+
+/* Max size of fit size */
+#define HL_FW_VERSIONS_FIT_SIZE 4096
+
+#endif /* HL_BOOT_IF_H */
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 2f4dcc8d060e..3bb87bf6bc65 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -170,19 +170,19 @@ struct hdmi_avi_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
unsigned char length;
+ bool itc;
+ unsigned char pixel_repeat;
enum hdmi_colorspace colorspace;
enum hdmi_scan_mode scan_mode;
enum hdmi_colorimetry colorimetry;
enum hdmi_picture_aspect picture_aspect;
enum hdmi_active_aspect active_aspect;
- bool itc;
enum hdmi_extended_colorimetry extended_colorimetry;
enum hdmi_quantization_range quantization_range;
enum hdmi_nups nups;
unsigned char video_code;
enum hdmi_ycc_quantization_range ycc_quantization_range;
enum hdmi_content_type content_type;
- unsigned char pixel_repeat;
unsigned short top_bar;
unsigned short bottom_bar;
unsigned short left_bar;
diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h
index 3214fb0815fc..753654fff07f 100644
--- a/include/linux/hid-roccat.h
+++ b/include/linux/hid-roccat.h
@@ -16,7 +16,7 @@
#ifdef __KERNEL__
-int roccat_connect(struct class *klass, struct hid_device *hid,
+int roccat_connect(const struct class *klass, struct hid_device *hid,
int report_size);
void roccat_disconnect(int minor);
int roccat_report_event(int minor, u8 const *data);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 4e4c4fe36911..7c26db874ff0 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -341,6 +341,29 @@ struct hid_item {
*/
#define MAX_USBHID_BOOT_QUIRKS 4
+/**
+ * DOC: HID quirks
+ * | @HID_QUIRK_NOTOUCH:
+ * | @HID_QUIRK_IGNORE: ignore this device
+ * | @HID_QUIRK_NOGET:
+ * | @HID_QUIRK_HIDDEV_FORCE:
+ * | @HID_QUIRK_BADPAD:
+ * | @HID_QUIRK_MULTI_INPUT:
+ * | @HID_QUIRK_HIDINPUT_FORCE:
+ * | @HID_QUIRK_ALWAYS_POLL:
+ * | @HID_QUIRK_INPUT_PER_APP:
+ * | @HID_QUIRK_X_INVERT:
+ * | @HID_QUIRK_Y_INVERT:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORTS:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORT_ID:
+ * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP:
+ * | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
+ * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
+ * | @HID_QUIRK_FULLSPEED_INTERVAL:
+ * | @HID_QUIRK_NO_INIT_REPORTS:
+ * | @HID_QUIRK_NO_IGNORE:
+ * | @HID_QUIRK_NO_INPUT_SYNC:
+ */
/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */
#define HID_QUIRK_NOTOUCH BIT(1)
#define HID_QUIRK_IGNORE BIT(2)
@@ -360,6 +383,7 @@ struct hid_item {
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
+#define HID_QUIRK_NOINVERT BIT(21)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
@@ -555,9 +579,9 @@ struct hid_input {
struct hid_report *report;
struct input_dev *input;
const char *name;
- bool registered;
struct list_head reports; /* the list of reports */
unsigned int application; /* application usage for this input */
+ bool registered;
};
enum hid_type {
@@ -597,6 +621,7 @@ struct hid_device { /* device report descriptor */
struct semaphore driver_input_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
+ void *devres_group_id; /* ID of probe devres group */
const struct hid_ll_driver *ll_driver;
struct mutex ll_open_lock;
@@ -654,6 +679,7 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list;
spinlock_t debug_list_lock;
wait_queue_head_t debug_wait;
+ struct kref ref;
unsigned int id; /* system unique id */
@@ -662,6 +688,8 @@ struct hid_device { /* device report descriptor */
#endif /* CONFIG_BPF */
};
+void hiddev_free(struct kref *ref);
+
#define to_hid_device(pdev) \
container_of(pdev, struct hid_device, dev)
@@ -808,11 +836,11 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
-#ifdef CONFIG_PM
+
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
int (*reset_resume)(struct hid_device *hdev);
-#endif
+
/* private: */
struct device_driver driver;
};
@@ -884,7 +912,7 @@ extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
-extern struct bus_type hid_bus_type;
+extern const struct bus_type hid_bus_type;
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
index e9afb61e6ee0..7118ac28d468 100644
--- a/include/linux/hid_bpf.h
+++ b/include/linux/hid_bpf.h
@@ -77,17 +77,6 @@ enum hid_bpf_attach_flags {
int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
-/* Following functions are kfunc that we export to BPF programs */
-/* available everywhere in HID-BPF */
-__u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t __sz);
-
-/* only available in syscall */
-int hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags);
-int hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
- enum hid_report_type rtype, enum hid_class_request reqtype);
-struct hid_bpf_ctx *hid_bpf_allocate_context(unsigned int hid_id);
-void hid_bpf_release_context(struct hid_bpf_ctx *ctx);
-
/*
* Below is HID internal
*/
@@ -115,7 +104,7 @@ struct hid_bpf_ops {
size_t len, enum hid_report_type rtype,
enum hid_class_request reqtype);
struct module *owner;
- struct bus_type *bus_type;
+ const struct bus_type *bus_type;
};
extern struct hid_bpf_ops *hid_bpf_ops;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 4de1dbcd3ef6..451c1dff0e87 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -439,6 +439,126 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
kunmap_local(addr);
}
+static inline void memcpy_from_folio(char *to, struct folio *folio,
+ size_t offset, size_t len)
+{
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ const char *from = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_highmem(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(from);
+
+ to += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+}
+
+static inline void memcpy_to_folio(struct folio *folio, size_t offset,
+ const char *from, size_t len)
+{
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ char *to = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_highmem(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(to);
+
+ from += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+
+ flush_dcache_folio(folio);
+}
+
+/**
+ * folio_zero_tail - Zero the tail of a folio.
+ * @folio: The folio to zero.
+ * @offset: The byte offset in the folio to start zeroing at.
+ * @kaddr: The address the folio is currently mapped to.
+ *
+ * If you have already used kmap_local_folio() to map a folio, written
+ * some data to it and now need to zero the end of the folio (and flush
+ * the dcache), you can use this function. If you do not have the
+ * folio kmapped (eg the folio has been partially populated by DMA),
+ * use folio_zero_range() or folio_zero_segment() instead.
+ *
+ * Return: An address which can be passed to kunmap_local().
+ */
+static inline __must_check void *folio_zero_tail(struct folio *folio,
+ size_t offset, void *kaddr)
+{
+ size_t len = folio_size(folio) - offset;
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memset(kaddr, 0, max);
+ kunmap_local(kaddr);
+ len -= max;
+ offset += max;
+ max = PAGE_SIZE;
+ kaddr = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memset(kaddr, 0, len);
+ flush_dcache_folio(folio);
+
+ return kaddr;
+}
+
+/**
+ * folio_fill_tail - Copy some data to a folio and pad with zeroes.
+ * @folio: The destination folio.
+ * @offset: The offset into @folio at which to start copying.
+ * @from: The data to copy.
+ * @len: How many bytes of data to copy.
+ *
+ * This function is most useful for filesystems which support inline data.
+ * When they want to copy data from the inode into the page cache, this
+ * function does everything for them. It supports large folios even on
+ * HIGHMEM configurations.
+ */
+static inline void folio_fill_tail(struct folio *folio, size_t offset,
+ const char *from, size_t len)
+{
+ char *to = kmap_local_folio(folio, offset);
+
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ if (folio_test_highmem(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memcpy(to, from, max);
+ kunmap_local(to);
+ len -= max;
+ from += max;
+ offset += max;
+ max = PAGE_SIZE;
+ to = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memcpy(to, from, len);
+ to = folio_zero_tail(folio, offset + len, to + len);
+ kunmap_local(to);
+}
+
/**
* memcpy_from_file_folio - Copy some bytes from a file folio.
* @to: The destination buffer.
@@ -507,10 +627,24 @@ static inline void folio_zero_range(struct folio *folio,
zero_user_segments(&folio->page, start, start + length, 0, 0);
}
-static inline void put_and_unmap_page(struct page *page, void *addr)
+/**
+ * folio_release_kmap - Unmap a folio and drop a refcount.
+ * @folio: The folio to release.
+ * @addr: The address previously returned by a call to kmap_local_folio().
+ *
+ * It is common, eg in directory handling to kmap a folio. This function
+ * unmaps the folio and drops the refcount that was being held to keep the
+ * folio alive while we accessed it.
+ */
+static inline void folio_release_kmap(struct folio *folio, void *addr)
{
kunmap_local(addr);
- put_page(page);
+ folio_put(folio);
+}
+
+static inline void unmap_and_put_page(struct page *page, void *addr)
+{
+ folio_release_kmap(page_folio(page), addr);
}
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index a7d54d4d41fd..5f4c74facf6a 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -104,21 +104,17 @@
enum qm_stop_reason {
QM_NORMAL,
QM_SOFT_RESET,
- QM_FLR,
+ QM_DOWN,
};
enum qm_state {
- QM_INIT = 0,
- QM_START,
- QM_CLOSE,
+ QM_WORK = 0,
QM_STOP,
};
enum qp_state {
- QP_INIT = 1,
- QP_START,
+ QP_START = 1,
QP_STOP,
- QP_CLOSE,
};
enum qm_hw_ver {
@@ -144,6 +140,13 @@ enum qm_vf_state {
QM_NOT_READY,
};
+enum qm_misc_ctl_bits {
+ QM_DRIVER_REMOVING = 0x0,
+ QM_RST_SCHED,
+ QM_RESETTING,
+ QM_MODULE_PARAM,
+};
+
enum qm_cap_bits {
QM_SUPPORT_DB_ISOLATION = 0x0,
QM_SUPPORT_FUNC_QOS,
@@ -153,6 +156,11 @@ enum qm_cap_bits {
QM_SUPPORT_RPM,
};
+struct qm_dev_alg {
+ u64 alg_msk;
+ const char *alg;
+};
+
struct dfx_diff_registers {
u32 *regs;
u32 reg_offset;
@@ -258,6 +266,16 @@ struct hisi_qm_cap_info {
u32 v3_val;
};
+struct hisi_qm_cap_record {
+ u32 type;
+ u32 cap_val;
+};
+
+struct hisi_qm_cap_tables {
+ struct hisi_qm_cap_record *qm_cap_table;
+ struct hisi_qm_cap_record *dev_cap_table;
+};
+
struct hisi_qm_list {
struct mutex lock;
struct list_head list;
@@ -269,6 +287,7 @@ struct hisi_qm_poll_data {
struct hisi_qm *qm;
struct work_struct work;
u16 *qp_finish_id;
+ u16 eqe_num;
};
/**
@@ -285,6 +304,18 @@ struct qm_err_isolate {
struct list_head qm_hw_errs;
};
+struct qm_rsv_buf {
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqc *eqc;
+ struct qm_aeqc *aeqc;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqc_dma;
+ dma_addr_t aeqc_dma;
+ struct qm_dma qcdma;
+};
+
struct hisi_qm {
enum qm_hw_ver ver;
enum qm_fun_type fun_type;
@@ -317,6 +348,7 @@ struct hisi_qm {
dma_addr_t cqc_dma;
dma_addr_t eqe_dma;
dma_addr_t aeqe_dma;
+ struct qm_rsv_buf xqc_buf;
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
@@ -344,7 +376,6 @@ struct hisi_qm {
struct work_struct rst_work;
struct work_struct cmd_process;
- const char *algs;
bool use_sva;
resource_size_t phys_base;
@@ -355,6 +386,8 @@ struct hisi_qm {
u32 mb_qos;
u32 type_rate;
struct qm_err_isolate isolate_data;
+
+ struct hisi_qm_cap_tables cap_tables;
};
struct hisi_qp_status {
@@ -471,6 +504,20 @@ static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
mutex_init(&qm_list->lock);
}
+static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
@@ -516,8 +563,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
void hisi_qm_dev_shutdown(struct pci_dev *pdev);
void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
-int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
-void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
int hisi_qm_resume(struct device *dev);
int hisi_qm_suspend(struct device *dev);
void hisi_qm_pm_uninit(struct hisi_qm *qm);
@@ -528,6 +575,8 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
const struct hisi_qm_cap_info *info_table,
u32 index, bool is_read);
+int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
+ u32 dev_algs_size);
/* Used by VFIO ACC live migration driver */
struct pci_driver *hisi_sec_get_pf_driver(void);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 0ee140176f10..641c4567cfa7 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -13,13 +13,13 @@
#define _LINUX_HRTIMER_H
#include <linux/hrtimer_defs.h>
-#include <linux/rbtree.h>
+#include <linux/hrtimer_types.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/percpu.h>
+#include <linux/percpu-defs.h>
+#include <linux/rbtree.h>
#include <linux/seqlock.h>
#include <linux/timer.h>
-#include <linux/timerqueue.h>
struct hrtimer_clock_base;
struct hrtimer_cpu_base;
@@ -60,14 +60,6 @@ enum hrtimer_mode {
};
/*
- * Return values for the callback function
- */
-enum hrtimer_restart {
- HRTIMER_NORESTART, /* Timer is not restarted */
- HRTIMER_RESTART, /* Timer must be restarted */
-};
-
-/*
* Values to track state of the timer
*
* Possible states:
@@ -95,38 +87,6 @@ enum hrtimer_restart {
#define HRTIMER_STATE_ENQUEUED 0x01
/**
- * struct hrtimer - the basic hrtimer structure
- * @node: timerqueue node, which also manages node.expires,
- * the absolute expiry time in the hrtimers internal
- * representation. The time is related to the clock on
- * which the timer is based. Is setup by adding
- * slack to the _softexpires value. For non range timers
- * identical to _softexpires.
- * @_softexpires: the absolute earliest expiry time of the hrtimer.
- * The time which was given as expiry time when the timer
- * was armed.
- * @function: timer expiry callback function
- * @base: pointer to the timer base (per cpu and per clock)
- * @state: state information (See bit values above)
- * @is_rel: Set if the timer was armed relative
- * @is_soft: Set if hrtimer will be expired in soft interrupt context.
- * @is_hard: Set if hrtimer will be expired in hard interrupt context
- * even on RT.
- *
- * The hrtimer structure must be initialized by hrtimer_init()
- */
-struct hrtimer {
- struct timerqueue_node node;
- ktime_t _softexpires;
- enum hrtimer_restart (*function)(struct hrtimer *);
- struct hrtimer_clock_base *base;
- u8 state;
- u8 is_rel;
- u8 is_soft;
- u8 is_hard;
-};
-
-/**
* struct hrtimer_sleeper - simple sleeper structure
* @timer: embedded timer structure
* @task: task to wake up
@@ -197,6 +157,7 @@ enum hrtimer_base_type {
* @max_hang_time: Maximum time spent in hrtimer_interrupt
* @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
* expired
+ * @online: CPU is online from an hrtimers point of view
* @timer_waiters: A hrtimer_cancel() invocation waits for the timer
* callback to finish.
* @expires_next: absolute time of the next event, is required for remote
@@ -219,7 +180,8 @@ struct hrtimer_cpu_base {
unsigned int hres_active : 1,
in_hrtirq : 1,
hang_detected : 1,
- softirq_activated : 1;
+ softirq_activated : 1,
+ online : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int nr_events;
unsigned short nr_retries;
@@ -531,9 +493,9 @@ extern void sysrq_timer_list_show(void);
int hrtimers_prepare_cpu(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
-int hrtimers_dead_cpu(unsigned int cpu);
+int hrtimers_cpu_dying(unsigned int cpu);
#else
-#define hrtimers_dead_cpu NULL
+#define hrtimers_cpu_dying NULL
#endif
#endif
diff --git a/include/linux/hrtimer_types.h b/include/linux/hrtimer_types.h
new file mode 100644
index 000000000000..ad66a3081735
--- /dev/null
+++ b/include/linux/hrtimer_types.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HRTIMER_TYPES_H
+#define _LINUX_HRTIMER_TYPES_H
+
+#include <linux/types.h>
+#include <linux/timerqueue_types.h>
+
+struct hrtimer_clock_base;
+
+/*
+ * Return values for the callback function
+ */
+enum hrtimer_restart {
+ HRTIMER_NORESTART, /* Timer is not restarted */
+ HRTIMER_RESTART, /* Timer must be restarted */
+};
+
+/**
+ * struct hrtimer - the basic hrtimer structure
+ * @node: timerqueue node, which also manages node.expires,
+ * the absolute expiry time in the hrtimers internal
+ * representation. The time is related to the clock on
+ * which the timer is based. Is setup by adding
+ * slack to the _softexpires value. For non range timers
+ * identical to _softexpires.
+ * @_softexpires: the absolute earliest expiry time of the hrtimer.
+ * The time which was given as expiry time when the timer
+ * was armed.
+ * @function: timer expiry callback function
+ * @base: pointer to the timer base (per cpu and per clock)
+ * @state: state information (See bit values above)
+ * @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
+ * @is_hard: Set if hrtimer will be expired in hard interrupt context
+ * even on RT.
+ *
+ * The hrtimer structure must be initialized by hrtimer_init()
+ */
+struct hrtimer {
+ struct timerqueue_node node;
+ ktime_t _softexpires;
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ u8 state;
+ u8 is_rel;
+ u8 is_soft;
+ u8 is_hard;
+};
+
+#endif /* _LINUX_HRTIMER_TYPES_H */
diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h
index 2d6f3cfa7dea..972434daa000 100644
--- a/include/linux/hsi/ssi_protocol.h
+++ b/include/linux/hsi/ssi_protocol.h
@@ -24,6 +24,7 @@ int ssip_slave_stop_tx(struct hsi_client *master);
void ssip_reset_event(struct hsi_client *master);
int ssip_slave_running(struct hsi_client *master);
+void ssi_waketest(struct hsi_client *cl, unsigned int enable);
#endif /* __LINUX_SSIP_SLAVE_H__ */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 20284387b841..5adb86af35fc 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -25,9 +25,6 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
#endif
vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
-struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmd,
- unsigned int flags);
bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next);
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
@@ -70,6 +67,26 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+/*
+ * Mask of all large folio orders supported for anonymous THP; all orders up to
+ * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
+ * (which is a limitation of the THP implementation).
+ */
+#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
+
+/*
+ * Mask of all large folio orders supported for file THP.
+ */
+#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+
+/*
+ * Mask of all large folio orders supported for THP.
+ */
+#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
+
+#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
@@ -80,45 +97,105 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
extern unsigned long transparent_hugepage_flags;
+extern unsigned long huge_anon_orders_always;
+extern unsigned long huge_anon_orders_madvise;
+extern unsigned long huge_anon_orders_inherit;
+
+static inline bool hugepage_global_enabled(void)
+{
+ return transparent_hugepage_flags &
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
+}
+
+static inline bool hugepage_global_always(void)
+{
+ return transparent_hugepage_flags &
+ (1<<TRANSPARENT_HUGEPAGE_FLAG);
+}
+
+static inline bool hugepage_flags_enabled(void)
+{
+ /*
+ * We cover both the anon and the file-backed case here; we must return
+ * true if globally enabled, even when all anon sizes are set to never.
+ * So we don't need to look at huge_anon_orders_inherit.
+ */
+ return hugepage_global_enabled() ||
+ huge_anon_orders_always ||
+ huge_anon_orders_madvise;
+}
+
+static inline int highest_order(unsigned long orders)
+{
+ return fls_long(orders) - 1;
+}
-#define hugepage_flags_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define hugepage_flags_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
+static inline int next_order(unsigned long *orders, int prev)
+{
+ *orders &= ~BIT(prev);
+ return highest_order(*orders);
+}
/*
* Do the below checks:
* - For file vma, check if the linear page offset of vma is
- * HPAGE_PMD_NR aligned within the file. The hugepage is
- * guaranteed to be hugepage-aligned within the file, but we must
- * check that the PMD-aligned addresses in the VMA map to
- * PMD-aligned offsets within the file, else the hugepage will
- * not be PMD-mappable.
- * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
+ * order-aligned within the file. The hugepage is
+ * guaranteed to be order-aligned within the file, but we must
+ * check that the order-aligned addresses in the VMA map to
+ * order-aligned offsets within the file, else the hugepage will
+ * not be mappable.
+ * - For all vmas, check if the haddr is in an aligned hugepage
* area.
*/
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long addr)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
+ unsigned long hpage_size = PAGE_SIZE << order;
unsigned long haddr;
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
- HPAGE_PMD_NR))
+ hpage_size >> PAGE_SHIFT))
return false;
}
- haddr = addr & HPAGE_PMD_MASK;
+ haddr = ALIGN_DOWN(addr, hpage_size);
- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
return false;
return true;
}
+/*
+ * Filter the bitfield of input orders to the ones suitable for use in the vma.
+ * See thp_vma_suitable_order().
+ * All orders that pass the checks are returned as a bitfield.
+ */
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
+{
+ int order;
+
+ /*
+ * Iterate over orders, highest to lowest, removing orders that don't
+ * meet alignment requirements from the set. Exit loop at first order
+ * that meets requirements, since all lower orders must also meet
+ * requirements.
+ */
+
+ order = highest_order(orders);
+
+ while (orders) {
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ return orders;
+}
+
static inline bool file_thp_enabled(struct vm_area_struct *vma)
{
struct inode *inode;
@@ -129,12 +206,55 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
inode = vma->vm_file->f_inode;
return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
- (vma->vm_flags & VM_EXEC) &&
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
-bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
- bool smaps, bool in_pf, bool enforce_sysfs);
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders);
+
+/**
+ * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
+ * @vma: the vm area to check
+ * @vm_flags: use these vm_flags instead of vma->vm_flags
+ * @smaps: whether answer will be used for smaps file
+ * @in_pf: whether answer will be used by page fault handler
+ * @enforce_sysfs: whether sysfs config should be taken into account
+ * @orders: bitfield of all orders to consider
+ *
+ * Calculates the intersection of the requested hugepage orders and the allowed
+ * hugepage orders for the provided vma. Permitted orders are encoded as a set
+ * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
+ * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
+ *
+ * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
+ * orders are allowed.
+ */
+static inline
+unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ /* Optimization to check if required orders are enabled early. */
+ if (enforce_sysfs && vma_is_anonymous(vma)) {
+ unsigned long mask = READ_ONCE(huge_anon_orders_always);
+
+ if (vm_flags & VM_HUGEPAGE)
+ mask |= READ_ONCE(huge_anon_orders_madvise);
+ if (hugepage_global_always() ||
+ ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
+ mask |= READ_ONCE(huge_anon_orders_inherit);
+
+ orders &= mask;
+ if (!orders)
+ return 0;
+ }
+
+ return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
+ enforce_sysfs, orders);
+}
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -143,9 +263,7 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
-void prep_transhuge_page(struct page *page);
-void free_transhuge_page(struct page *page);
-
+void folio_prep_large_rmappable(struct folio *folio);
bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
@@ -272,20 +390,27 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
return false;
}
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long addr)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
return false;
}
-static inline bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags, bool smaps,
- bool in_pf, bool enforce_sysfs)
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
{
- return false;
+ return 0;
+}
+
+static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs,
+ unsigned long orders)
+{
+ return 0;
}
-static inline void prep_transhuge_page(struct page *page) {}
+static inline void folio_prep_large_rmappable(struct folio *folio) {}
#define transparent_hugepage_flags 0UL
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6d041aa9f0fe..c1ee640d87b1 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -26,9 +26,11 @@ typedef struct { unsigned long pd; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { (x) })
#endif
+void free_huge_folio(struct folio *folio);
+
#ifdef CONFIG_HUGETLB_PAGE
-#include <linux/mempolicy.h>
+#include <linux/pagemap.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
@@ -58,6 +60,7 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -131,15 +134,12 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct vm_area_struct *, struct vm_area_struct *);
struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags);
-long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
- struct page **, struct vm_area_struct **,
- unsigned long *, unsigned long *, long, unsigned int,
- int *);
+ unsigned long address, unsigned int flags,
+ unsigned int *page_mask);
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags);
@@ -168,7 +168,6 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
void folio_putback_active_hugetlb(struct folio *folio);
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
-void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
@@ -247,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@@ -262,6 +280,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long cp_flags);
bool is_hugetlb_entry_migration(pte_t pte);
+bool is_hugetlb_entry_hwpoisoned(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
@@ -298,20 +317,23 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
-static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
{
- BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
}
-static inline long follow_hugetlb_page(struct mm_struct *mm,
- struct vm_area_struct *vma, struct page **pages,
- struct vm_area_struct **vmas, unsigned long *position,
- unsigned long *nr_pages, long i, unsigned int flags,
- int *nonblocking)
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
{
- BUG();
- return 0;
+}
+
+static inline struct page *hugetlb_follow_page_mask(
+ struct vm_area_struct *vma, unsigned long address, unsigned int flags,
+ unsigned int *page_mask)
+{
+ BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
}
static inline int copy_hugetlb_page_range(struct mm_struct *dst,
@@ -452,7 +474,7 @@ static inline long hugetlb_change_protection(
return 0;
}
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
@@ -523,7 +545,6 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
}
struct hugetlbfs_inode_info {
- struct shared_policy policy;
struct inode vfs_inode;
unsigned int seals;
};
@@ -727,8 +748,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);
-struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
- unsigned long address);
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
@@ -757,26 +776,12 @@ static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio
return folio->_hugetlb_subpool;
}
-/*
- * hugetlb page subpool pointer located in hpage[2].hugetlb_subpool
- */
-static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
-{
- return hugetlb_folio_subpool(page_folio(hpage));
-}
-
static inline void hugetlb_set_folio_subpool(struct folio *folio,
struct hugepage_subpool *subpool)
{
folio->_hugetlb_subpool = subpool;
}
-static inline void hugetlb_set_page_subpool(struct page *hpage,
- struct hugepage_subpool *subpool)
-{
- hugetlb_set_folio_subpool(page_folio(hpage), subpool);
-}
-
static inline struct hstate *hstate_file(struct file *f)
{
return hstate_inode(file_inode(f));
@@ -824,7 +829,7 @@ static inline unsigned huge_page_shift(struct hstate *h)
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) > MAX_ORDER;
+ return huge_page_order(h) > MAX_PAGE_ORDER;
}
static inline unsigned int pages_per_huge_page(const struct hstate *h)
@@ -837,6 +842,12 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
return huge_page_size(h) / 512;
}
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
+{
+ return filemap_lock_folio(mapping, idx << huge_page_order(h));
+}
+
#include <asm/hugetlb.h>
#ifndef is_hugepage_only_range
@@ -867,11 +878,6 @@ static inline struct hstate *folio_hstate(struct folio *folio)
return size_to_hstate(folio_size(folio));
}
-static inline struct hstate *page_hstate(struct page *page)
-{
- return folio_hstate(page_folio(page));
-}
-
static inline unsigned hstate_index_to_shift(unsigned index)
{
return hstates[index].order + PAGE_SHIFT;
@@ -1014,7 +1020,9 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
#endif
@@ -1023,6 +1031,11 @@ void hugetlb_register_node(struct node *node);
void hugetlb_unregister_node(struct node *node);
#endif
+/*
+ * Check if a given raw @page in a hugepage is HWPOISON.
+ */
+bool is_raw_hwpoison_page_in_hugepage(struct page *page);
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
@@ -1031,7 +1044,8 @@ static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio
return NULL;
}
-static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
{
return NULL;
}
@@ -1056,13 +1070,6 @@ alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
return NULL;
}
-static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
- struct vm_area_struct *vma,
- unsigned long address)
-{
- return NULL;
-}
-
static inline int __alloc_bootmem_huge_page(struct hstate *h)
{
return 0;
@@ -1088,11 +1095,6 @@ static inline struct hstate *folio_hstate(struct folio *folio)
return NULL;
}
-static inline struct hstate *page_hstate(struct page *page)
-{
- return NULL;
-}
-
static inline struct hstate *size_to_hstate(unsigned long size)
{
return NULL;
@@ -1200,11 +1202,15 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+#ifdef CONFIG_MMU
+ return ptep_get(ptep);
+#else
return *ptep;
+#endif
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
}
@@ -1262,6 +1268,8 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}
+bool __vma_private_lock(struct vm_area_struct *vma);
+
/*
* Safe version of huge_pte_offset() to check the locks. See comments
* above huge_pte_offset().
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 3d82d91f49ac..e5d64b8b59c2 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -22,13 +22,6 @@ struct resv_map;
struct file_region;
#ifdef CONFIG_CGROUP_HUGETLB
-/*
- * Minimum page order trackable by hugetlb cgroup.
- * At least 3 pages are necessary for all the tracking information.
- * The second tail page contains all of the hugetlb-specific fields.
- */
-#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__NR_USED_SUBPAGE)
-
enum hugetlb_memory_event {
HUGETLB_MAX,
HUGETLB_NR_MEMORY_EVENTS,
@@ -68,8 +61,6 @@ static inline struct hugetlb_cgroup *
__hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
{
VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
- if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
- return NULL;
if (rsvd)
return folio->_hugetlb_cgroup_rsvd;
else
@@ -91,8 +82,6 @@ static inline void __set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg, bool rsvd)
{
VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
- if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
- return;
if (rsvd)
folio->_hugetlb_cgroup_rsvd = h_cg;
else
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 7fbb45911273..db199d653dd1 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -90,9 +90,6 @@ extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
-int arch_reserve_bp_slot(struct perf_event *bp);
-void arch_release_bp_slot(struct perf_event *bp);
-void arch_unregister_hw_breakpoint(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 8a3115516a1b..136e9842120e 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
+extern long hwrng_yield(struct hwrng *rng);
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 492dd27a5dd8..8cd6a6b33593 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -44,6 +44,7 @@ enum hwmon_chip_attributes {
hwmon_chip_in_samples,
hwmon_chip_power_samples,
hwmon_chip_temp_samples,
+ hwmon_chip_beep_enable,
};
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
@@ -58,6 +59,7 @@ enum hwmon_chip_attributes {
#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples)
#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
+#define HWMON_C_BEEP_ENABLE BIT(hwmon_chip_beep_enable)
enum hwmon_temp_attributes {
hwmon_temp_enable,
@@ -87,6 +89,7 @@ enum hwmon_temp_attributes {
hwmon_temp_reset_history,
hwmon_temp_rated_min,
hwmon_temp_rated_max,
+ hwmon_temp_beep,
};
#define HWMON_T_ENABLE BIT(hwmon_temp_enable)
@@ -116,6 +119,7 @@ enum hwmon_temp_attributes {
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
#define HWMON_T_RATED_MIN BIT(hwmon_temp_rated_min)
#define HWMON_T_RATED_MAX BIT(hwmon_temp_rated_max)
+#define HWMON_T_BEEP BIT(hwmon_temp_beep)
enum hwmon_in_attributes {
hwmon_in_enable,
@@ -136,6 +140,7 @@ enum hwmon_in_attributes {
hwmon_in_crit_alarm,
hwmon_in_rated_min,
hwmon_in_rated_max,
+ hwmon_in_beep,
};
#define HWMON_I_ENABLE BIT(hwmon_in_enable)
@@ -156,6 +161,7 @@ enum hwmon_in_attributes {
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
#define HWMON_I_RATED_MIN BIT(hwmon_in_rated_min)
#define HWMON_I_RATED_MAX BIT(hwmon_in_rated_max)
+#define HWMON_I_BEEP BIT(hwmon_in_beep)
enum hwmon_curr_attributes {
hwmon_curr_enable,
@@ -176,6 +182,7 @@ enum hwmon_curr_attributes {
hwmon_curr_crit_alarm,
hwmon_curr_rated_min,
hwmon_curr_rated_max,
+ hwmon_curr_beep,
};
#define HWMON_C_ENABLE BIT(hwmon_curr_enable)
@@ -196,6 +203,7 @@ enum hwmon_curr_attributes {
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
#define HWMON_C_RATED_MIN BIT(hwmon_curr_rated_min)
#define HWMON_C_RATED_MAX BIT(hwmon_curr_rated_max)
+#define HWMON_C_BEEP BIT(hwmon_curr_beep)
enum hwmon_power_attributes {
hwmon_power_enable,
@@ -312,6 +320,7 @@ enum hwmon_fan_attributes {
hwmon_fan_min_alarm,
hwmon_fan_max_alarm,
hwmon_fan_fault,
+ hwmon_fan_beep,
};
#define HWMON_F_ENABLE BIT(hwmon_fan_enable)
@@ -326,6 +335,7 @@ enum hwmon_fan_attributes {
#define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm)
#define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm)
#define HWMON_F_FAULT BIT(hwmon_fan_fault)
+#define HWMON_F_BEEP BIT(hwmon_fan_beep)
enum hwmon_pwm_attributes {
hwmon_pwm_input,
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index bfbc37ce223b..6ef0557b4bff 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -164,8 +164,28 @@ struct hv_ring_buffer {
u8 buffer[];
} __packed;
+
+/*
+ * If the requested ring buffer size is at least 8 times the size of the
+ * header, steal space from the ring buffer for the header. Otherwise, add
+ * space for the header so that is doesn't take too much of the ring buffer
+ * space.
+ *
+ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
+ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
+ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
+ * large allocation that will be almost half wasted. As a contrasting example,
+ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
+ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
+ * In this latter case, we must add 64 Kbytes for the header and not worry
+ * about what's wasted.
+ */
+#define VMBUS_HEADER_ADJ(payload_sz) \
+ ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \
+ 0 : sizeof(struct hv_ring_buffer))
+
/* Calculate the proper size of a ringbuffer, it must be page-aligned */
-#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
(payload_sz))
struct hv_ring_buffer_info {
@@ -348,7 +368,7 @@ struct vmtransfer_page_packet_header {
u8 sender_owns_set;
u8 reserved;
u32 range_cnt;
- struct vmtransfer_page_range ranges[1];
+ struct vmtransfer_page_range ranges[];
} __packed;
struct vmgpadl_packet_header {
@@ -665,8 +685,8 @@ struct vmbus_channel_initiate_contact {
u64 interrupt_page;
struct {
u8 msg_sint;
- u8 padding1[3];
- u32 padding2;
+ u8 msg_vtl;
+ u8 reserved[6];
};
};
u64 monitor_page1;
@@ -1239,9 +1259,6 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
u32 *buffer_actual_len,
u64 *requestid);
-
-extern void vmbus_ontimer(unsigned long data);
-
/* Base driver object */
struct hv_driver {
const char *name;
diff --git a/include/linux/i2c-atr.h b/include/linux/i2c-atr.h
new file mode 100644
index 000000000000..4d5da161c225
--- /dev/null
+++ b/include/linux/i2c-atr.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * I2C Address Translator
+ *
+ * Copyright (c) 2019,2022 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2022,2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Based on i2c-mux.h
+ */
+
+#ifndef _LINUX_I2C_ATR_H
+#define _LINUX_I2C_ATR_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+struct device;
+struct fwnode_handle;
+struct i2c_atr;
+
+/**
+ * struct i2c_atr_ops - Callbacks from ATR to the device driver.
+ * @attach_client: Notify the driver of a new device connected on a child
+ * bus, with the alias assigned to it. The driver must
+ * configure the hardware to use the alias.
+ * @detach_client: Notify the driver of a device getting disconnected. The
+ * driver must configure the hardware to stop using the
+ * alias.
+ *
+ * All these functions return 0 on success, a negative error code otherwise.
+ */
+struct i2c_atr_ops {
+ int (*attach_client)(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client, u16 alias);
+ void (*detach_client)(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client);
+};
+
+/**
+ * i2c_atr_new() - Allocate and initialize an I2C ATR helper.
+ * @parent: The parent (upstream) adapter
+ * @dev: The device acting as an ATR
+ * @ops: Driver-specific callbacks
+ * @max_adapters: Maximum number of child adapters
+ *
+ * The new ATR helper is connected to the parent adapter but has no child
+ * adapters. Call i2c_atr_add_adapter() to add some.
+ *
+ * Call i2c_atr_delete() to remove.
+ *
+ * Return: pointer to the new ATR helper object, or ERR_PTR
+ */
+struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
+ const struct i2c_atr_ops *ops, int max_adapters);
+
+/**
+ * i2c_atr_delete - Delete an I2C ATR helper.
+ * @atr: I2C ATR helper to be deleted.
+ *
+ * Precondition: all the adapters added with i2c_atr_add_adapter() must be
+ * removed by calling i2c_atr_del_adapter().
+ */
+void i2c_atr_delete(struct i2c_atr *atr);
+
+/**
+ * i2c_atr_add_adapter - Create a child ("downstream") I2C bus.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the new adapter (0 .. max_adapters-1). This value is
+ * passed to the callbacks in `struct i2c_atr_ops`.
+ * @adapter_parent: The device used as the parent of the new i2c adapter, or NULL
+ * to use the i2c-atr device as the parent.
+ * @bus_handle: The fwnode handle that points to the adapter's i2c
+ * peripherals, or NULL.
+ *
+ * After calling this function a new i2c bus will appear. Adding and removing
+ * devices on the downstream bus will result in calls to the
+ * &i2c_atr_ops->attach_client and &i2c_atr_ops->detach_client callbacks for the
+ * driver to assign an alias to the device.
+ *
+ * The adapter's fwnode is set to @bus_handle, or if @bus_handle is NULL the
+ * function looks for a child node whose 'reg' property matches the chan_id
+ * under the i2c-atr device's 'i2c-atr' node.
+ *
+ * Call i2c_atr_del_adapter() to remove the adapter.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
+ struct device *adapter_parent,
+ struct fwnode_handle *bus_handle);
+
+/**
+ * i2c_atr_del_adapter - Remove a child ("downstream") I2C bus added by
+ * i2c_atr_add_adapter(). If no I2C bus has been added
+ * this function is a no-op.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the adapter to be removed (0 .. max_adapters-1)
+ */
+void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id);
+
+/**
+ * i2c_atr_set_driver_data - Set private driver data to the i2c-atr instance.
+ * @atr: The I2C ATR
+ * @data: Pointer to the data to store
+ */
+void i2c_atr_set_driver_data(struct i2c_atr *atr, void *data);
+
+/**
+ * i2c_atr_get_driver_data - Get the stored drive data.
+ * @atr: The I2C ATR
+ *
+ * Return: Pointer to the stored data
+ */
+void *i2c_atr_get_driver_data(struct i2c_atr *atr);
+
+#endif /* _LINUX_I2C_ATR_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 13a1ce38cb0c..652ecb7abeda 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -23,7 +23,7 @@
#include <linux/swab.h> /* for swab16 */
#include <uapi/linux/i2c.h>
-extern struct bus_type i2c_bus_type;
+extern const struct bus_type i2c_bus_type;
extern struct device_type i2c_adapter_type;
extern struct device_type i2c_client_type;
@@ -237,7 +237,6 @@ enum i2c_driver_flags {
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @probe: Callback for device binding
- * @probe_new: Transitional callback for device binding - do not use
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
@@ -272,16 +271,8 @@ enum i2c_driver_flags {
struct i2c_driver {
unsigned int class;
- union {
/* Standard driver model interfaces */
- int (*probe)(struct i2c_client *client);
- /*
- * Legacy callback that was part of a conversion of .probe().
- * Today it has the same semantic as .probe(). Don't use for new
- * code.
- */
- int (*probe_new)(struct i2c_client *client);
- };
+ int (*probe)(struct i2c_client *client);
void (*remove)(struct i2c_client *client);
@@ -367,6 +358,8 @@ struct i2c_adapter *i2c_verify_adapter(struct device *dev);
const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client);
+const void *i2c_get_match_data(const struct i2c_client *client);
+
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
struct device * const dev = kobj_to_dev(kobj);
@@ -753,6 +746,8 @@ struct i2c_adapter {
struct irq_domain *host_notify_domain;
struct regulator *bus_regulator;
+
+ struct dentry *debugfs;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -857,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
-#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */
/* Warn users that the adapter doesn't support classes anymore */
#define I2C_CLASS_DEPRECATED (1<<8)
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 90fa83464f00..e119f11948ef 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -54,6 +54,7 @@ enum i3c_hdr_mode {
* struct i3c_priv_xfer - I3C SDR private transfer
* @rnw: encodes the transfer direction. true for a read, false for a write
* @len: transfer length in bytes of the transfer
+ * @actual_len: actual length in bytes are transferred by the controller
* @data: input/output buffer
* @data.in: input buffer. Must point to a DMA-able buffer
* @data.out: output buffer. Must point to a DMA-able buffer
@@ -62,6 +63,7 @@ enum i3c_hdr_mode {
struct i3c_priv_xfer {
u8 rnw;
u16 len;
+ u16 actual_len;
union {
void *in;
const void *out;
@@ -96,7 +98,7 @@ enum i3c_dcr {
/**
* struct i3c_device_info - I3C device information
- * @pid: Provisional ID
+ * @pid: Provisioned ID
* @bcr: Bus Characteristic Register
* @dcr: Device Characteristic Register
* @static_addr: static/I2C address
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 0b52da4f2346..0ca27dd86956 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -24,6 +24,12 @@
struct i2c_client;
+/* notifier actions. notifier call data is the struct i3c_bus */
+enum {
+ I3C_NOTIFY_BUS_ADD,
+ I3C_NOTIFY_BUS_REMOVE,
+};
+
struct i3c_master_controller;
struct i3c_bus;
struct i3c_device;
@@ -70,7 +76,6 @@ struct i2c_dev_boardinfo {
/**
* struct i2c_dev_desc - I2C device descriptor
* @common: common part of the I2C device descriptor
- * @boardinfo: pointer to the boardinfo attached to this I2C device
* @dev: I2C device object registered to the I2C framework
* @addr: I2C device address
* @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
@@ -129,6 +134,7 @@ struct i3c_ibi_slot {
* rejected by the master
* @num_slots: number of IBI slots reserved for this device
* @enabled: reflect the IBI status
+ * @wq: workqueue used to execute IBI handlers.
* @handler: IBI handler specified at i3c_device_request_ibi() call time. This
* handler will be called from the controller workqueue, and as such
* is allowed to sleep (though it is recommended to process the IBI
@@ -151,6 +157,7 @@ struct i3c_device_ibi_info {
unsigned int max_payload_len;
unsigned int num_slots;
unsigned int enabled;
+ struct workqueue_struct *wq;
void (*handler)(struct i3c_device *dev,
const struct i3c_ibi_payload *payload);
};
@@ -166,7 +173,7 @@ struct i3c_device_ibi_info {
* assigned a dynamic address by the master. Will be used during
* bus initialization to assign it a specific dynamic address
* before starting DAA (Dynamic Address Assignment)
- * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * @pid: I3C Provisioned ID exposed by the device. This is a unique identifier
* that may be used to attach boardinfo to i3c_dev_desc when the device
* does not have a static address
* @of_node: optional DT node in case the device has been described in the DT
@@ -426,6 +433,8 @@ struct i3c_bus {
* for a future IBI
* This method is mandatory only if ->request_ibi is not
* NULL.
+ * @enable_hotjoin: enable hot join event detect.
+ * @disable_hotjoin: disable hot join event detect.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -452,6 +461,8 @@ struct i3c_master_controller_ops {
int (*disable_ibi)(struct i3c_dev_desc *dev);
void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot);
+ int (*enable_hotjoin)(struct i3c_master_controller *master);
+ int (*disable_hotjoin)(struct i3c_master_controller *master);
};
/**
@@ -465,11 +476,12 @@ struct i3c_master_controller_ops {
* @ops: master operations. See &struct i3c_master_controller_ops
* @secondary: true if the master is a secondary master
* @init_done: true when the bus initialization is done
+ * @hotjoin: true if the master support hotjoin
* @boardinfo.i3c: list of I3C boardinfo objects
* @boardinfo.i2c: list of I2C boardinfo objects
* @boardinfo: board-level information attached to devices connected on the bus
* @bus: I3C bus exposed by this master
- * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * @wq: workqueue which can be used by master
* drivers if they need to postpone operations that need to take place
* in a thread context. Typical examples are Hot Join processing which
* requires taking the bus lock in maintenance, which in turn, can only
@@ -487,6 +499,7 @@ struct i3c_master_controller {
const struct i3c_master_controller_ops *ops;
unsigned int secondary : 1;
unsigned int init_done : 1;
+ unsigned int hotjoin: 1;
struct {
struct list_head i3c;
struct list_head i2c;
@@ -543,6 +556,8 @@ int i3c_master_register(struct i3c_master_controller *master,
const struct i3c_master_controller_ops *ops,
bool secondary);
void i3c_master_unregister(struct i3c_master_controller *master);
+int i3c_master_enable_hotjoin(struct i3c_master_controller *master);
+int i3c_master_disable_hotjoin(struct i3c_master_controller *master);
/**
* i3c_dev_get_master_data() - get master private data attached to an I3C
@@ -652,4 +667,9 @@ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data),
+ void *data);
+int i3c_register_notifier(struct notifier_block *nb);
+int i3c_unregister_notifier(struct notifier_block *nb);
+
#endif /* I3C_MASTER_H */
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 0261e2fb3636..95b07f8b77fe 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -3,6 +3,7 @@
#define _LINUX_I8042_H
+#include <linux/errno.h>
#include <linux/types.h>
/*
diff --git a/include/linux/i8254.h b/include/linux/i8254.h
new file mode 100644
index 000000000000..a675c309232b
--- /dev/null
+++ b/include/linux/i8254.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) William Breathitt Gray */
+#ifndef _I8254_H_
+#define _I8254_H_
+
+struct device;
+struct regmap;
+
+/**
+ * struct i8254_regmap_config - Configuration for the register map of an i8254
+ * @parent: parent device
+ * @map: regmap for the i8254
+ */
+struct i8254_regmap_config {
+ struct device *parent;
+ struct regmap *map;
+};
+
+int devm_i8254_regmap_register(struct device *dev, const struct i8254_regmap_config *config);
+
+#endif /* _I8254_H_ */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index db0f4fcfdaf4..e3b3b0fa2a8f 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -85,12 +85,10 @@ extern void icmpv6_param_prob_reason(struct sk_buff *skb,
struct flowi6;
struct in6_addr;
-extern void icmpv6_flow_init(struct sock *sk,
- struct flowi6 *fl6,
- u8 type,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- int oif);
+
+void icmpv6_flow_init(const struct sock *sk, struct flowi6 *fl6, u8 type,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr, int oif);
static inline void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
diff --git a/include/linux/idr.h b/include/linux/idr.h
index a0dce14090a9..da5f5fa4a3a6 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
*/
#define idr_for_each_entry_ul(idr, entry, tmp, id) \
for (tmp = 0, id = 0; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/**
@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
* @id: Entry ID.
*
* Continue to iterate over entries, continuing after the current position.
+ * After normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
for (tmp = id; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/*
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c4cf296e7eaf..83c4d060a559 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -172,11 +172,11 @@
#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
-/* PV1 Layout 11ah 9.8.3.1 */
+/* PV1 Layout IEEE 802.11-2020 9.8.3.1 */
#define IEEE80211_PV1_FCTL_VERS 0x0003
#define IEEE80211_PV1_FCTL_FTYPE 0x001c
#define IEEE80211_PV1_FCTL_STYPE 0x00e0
-#define IEEE80211_PV1_FCTL_TODS 0x0100
+#define IEEE80211_PV1_FCTL_FROMDS 0x0100
#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200
#define IEEE80211_PV1_FCTL_PM 0x0400
#define IEEE80211_PV1_FCTL_MOREDATA 0x0800
@@ -307,6 +307,13 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_TRIGGER_TYPE_BQRP 0x6
#define IEEE80211_TRIGGER_TYPE_NFRP 0x7
+/* UL-bandwidth within common_info of trigger frame */
+#define IEEE80211_TRIGGER_ULBW_MASK 0xc0000
+#define IEEE80211_TRIGGER_ULBW_20MHZ 0x0
+#define IEEE80211_TRIGGER_ULBW_40MHZ 0x1
+#define IEEE80211_TRIGGER_ULBW_80MHZ 0x2
+#define IEEE80211_TRIGGER_ULBW_160_80P80MHZ 0x3
+
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
@@ -836,9 +843,14 @@ enum ieee80211_preq_target_flags {
};
/**
- * struct ieee80211_quiet_ie
+ * struct ieee80211_quiet_ie - Quiet element
+ * @count: Quiet Count
+ * @period: Quiet Period
+ * @duration: Quiet Duration
+ * @offset: Quiet Offset
*
- * This structure refers to "Quiet information element"
+ * This structure represents the payload of the "Quiet element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.22.
*/
struct ieee80211_quiet_ie {
u8 count;
@@ -848,9 +860,15 @@ struct ieee80211_quiet_ie {
} __packed;
/**
- * struct ieee80211_msrment_ie
+ * struct ieee80211_msrment_ie - Measurement element
+ * @token: Measurement Token
+ * @mode: Measurement Report Mode
+ * @type: Measurement Type
+ * @request: Measurement Request or Measurement Report
*
- * This structure refers to "Measurement Request/Report information element"
+ * This structure represents the payload of both the "Measurement
+ * Request element" and the "Measurement Report element" as described
+ * in IEEE Std 802.11-2020 sections 9.4.2.20 and 9.4.2.21.
*/
struct ieee80211_msrment_ie {
u8 token;
@@ -860,9 +878,14 @@ struct ieee80211_msrment_ie {
} __packed;
/**
- * struct ieee80211_channel_sw_ie
+ * struct ieee80211_channel_sw_ie - Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
*
- * This structure refers to "Channel Switch Announcement information element"
+ * This structure represents the payload of the "Channel Switch
+ * Announcement element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.18.
*/
struct ieee80211_channel_sw_ie {
u8 mode;
@@ -871,9 +894,14 @@ struct ieee80211_channel_sw_ie {
} __packed;
/**
- * struct ieee80211_ext_chansw_ie
+ * struct ieee80211_ext_chansw_ie - Extended Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_operating_class: New Operating Class
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
*
- * This structure represents the "Extended Channel Switch Announcement element"
+ * This structure represents the "Extended Channel Switch Announcement
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.52.
*/
struct ieee80211_ext_chansw_ie {
u8 mode;
@@ -894,8 +922,14 @@ struct ieee80211_sec_chan_offs_ie {
/**
* struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
+ * @mesh_ttl: Time To Live
+ * @mesh_flags: Flags
+ * @mesh_reason: Reason Code
+ * @mesh_pre_value: Precedence Value
*
- * This structure represents the "Mesh Channel Switch Paramters element"
+ * This structure represents the payload of the "Mesh Channel Switch
+ * Parameters element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.102.
*/
struct ieee80211_mesh_chansw_params_ie {
u8 mesh_ttl;
@@ -906,6 +940,13 @@ struct ieee80211_mesh_chansw_params_ie {
/**
* struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
+ * @new_channel_width: New Channel Width
+ * @new_center_freq_seg0: New Channel Center Frequency Segment 0
+ * @new_center_freq_seg1: New Channel Center Frequency Segment 1
+ *
+ * This structure represents the payload of the "Wide Bandwidth
+ * Channel Switch element" as described in IEEE Std 802.11-2020
+ * section 9.4.2.160.
*/
struct ieee80211_wide_bw_chansw_ie {
u8 new_channel_width;
@@ -913,22 +954,42 @@ struct ieee80211_wide_bw_chansw_ie {
} __packed;
/**
- * struct ieee80211_tim
+ * struct ieee80211_tim_ie - Traffic Indication Map information element
+ * @dtim_count: DTIM Count
+ * @dtim_period: DTIM Period
+ * @bitmap_ctrl: Bitmap Control
+ * @required_octet: "Syntatic sugar" to force the struct size to the
+ * minimum valid size when carried in a non-S1G PPDU
+ * @virtual_map: Partial Virtual Bitmap
*
- * This structure refers to "Traffic Indication Map information element"
+ * This structure represents the payload of the "TIM element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.5. Note that this
+ * definition is only applicable when the element is carried in a
+ * non-S1G PPDU. When the TIM is carried in an S1G PPDU, the Bitmap
+ * Control and Partial Virtual Bitmap may not be present.
*/
struct ieee80211_tim_ie {
u8 dtim_count;
u8 dtim_period;
u8 bitmap_ctrl;
- /* variable size: 1 - 251 bytes */
- u8 virtual_map[1];
+ union {
+ u8 required_octet;
+ DECLARE_FLEX_ARRAY(u8, virtual_map);
+ };
} __packed;
/**
- * struct ieee80211_meshconf_ie
+ * struct ieee80211_meshconf_ie - Mesh Configuration element
+ * @meshconf_psel: Active Path Selection Protocol Identifier
+ * @meshconf_pmetric: Active Path Selection Metric Identifier
+ * @meshconf_congest: Congestion Control Mode Identifier
+ * @meshconf_synch: Synchronization Method Identifier
+ * @meshconf_auth: Authentication Protocol Identifier
+ * @meshconf_form: Mesh Formation Info
+ * @meshconf_cap: Mesh Capability (see &enum mesh_config_capab_flags)
*
- * This structure refers to "Mesh Configuration information element"
+ * This structure represents the payload of the "Mesh Configuration
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.97.
*/
struct ieee80211_meshconf_ie {
u8 meshconf_psel;
@@ -950,6 +1011,9 @@ struct ieee80211_meshconf_ie {
* is ongoing
* @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
* neighbors in deep sleep mode
+ *
+ * Enumerates the "Mesh Capability" as described in IEEE Std
+ * 802.11-2020 section 9.4.2.97.7.
*/
enum mesh_config_capab_flags {
IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01,
@@ -960,7 +1024,7 @@ enum mesh_config_capab_flags {
#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
-/**
+/*
* mesh channel switch parameters element's flag indicator
*
*/
@@ -969,9 +1033,17 @@ enum mesh_config_capab_flags {
#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
/**
- * struct ieee80211_rann_ie
+ * struct ieee80211_rann_ie - RANN (root announcement) element
+ * @rann_flags: Flags
+ * @rann_hopcount: Hop Count
+ * @rann_ttl: Element TTL
+ * @rann_addr: Root Mesh STA Address
+ * @rann_seq: HWMP Sequence Number
+ * @rann_interval: Interval
+ * @rann_metric: Metric
*
- * This structure refers to "Root Announcement information element"
+ * This structure represents the payload of the "RANN element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.111.
*/
struct ieee80211_rann_ie {
u8 rann_flags;
@@ -993,7 +1065,7 @@ enum ieee80211_ht_chanwidth_values {
};
/**
- * enum ieee80211_opmode_bits - VHT operating mode field bits
+ * enum ieee80211_vht_opmode_bits - VHT operating mode field bits
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
* @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
@@ -1042,9 +1114,12 @@ enum ieee80211_s1g_chanwidth {
#define WLAN_USER_POSITION_LEN 16
/**
- * struct ieee80211_tpc_report_ie
+ * struct ieee80211_tpc_report_ie - TPC Report element
+ * @tx_power: Transmit Power
+ * @link_margin: Link Margin
*
- * This structure refers to "TPC Report element"
+ * This structure represents the payload of the "TPC Report element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.16.
*/
struct ieee80211_tpc_report_ie {
u8 tx_power;
@@ -1062,9 +1137,14 @@ struct ieee80211_addba_ext_ie {
} __packed;
/**
- * struct ieee80211_s1g_bcn_compat_ie
+ * struct ieee80211_s1g_bcn_compat_ie - S1G Beacon Compatibility element
+ * @compat_info: Compatibility Information
+ * @beacon_int: Beacon Interval
+ * @tsf_completion: TSF Completion
*
- * S1G Beacon Compatibility element
+ * This structure represents the payload of the "S1G Beacon
+ * Compatibility element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.196.
*/
struct ieee80211_s1g_bcn_compat_ie {
__le16 compat_info;
@@ -1073,9 +1153,15 @@ struct ieee80211_s1g_bcn_compat_ie {
} __packed;
/**
- * struct ieee80211_s1g_oper_ie
+ * struct ieee80211_s1g_oper_ie - S1G Operation element
+ * @ch_width: S1G Operation Information Channel Width
+ * @oper_class: S1G Operation Information Operating Class
+ * @primary_ch: S1G Operation Information Primary Channel Number
+ * @oper_ch: S1G Operation Information Channel Center Frequency
+ * @basic_mcs_nss: Basic S1G-MCS and NSS Set
*
- * S1G Operation element
+ * This structure represents the payload of the "S1G Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.212.
*/
struct ieee80211_s1g_oper_ie {
u8 ch_width;
@@ -1086,9 +1172,13 @@ struct ieee80211_s1g_oper_ie {
} __packed;
/**
- * struct ieee80211_aid_response_ie
+ * struct ieee80211_aid_response_ie - AID Response element
+ * @aid: AID/Group AID
+ * @switch_count: AID Switch Count
+ * @response_int: AID Response Interval
*
- * AID Response element
+ * This structure represents the payload of the "AID Response element"
+ * as described in IEEE Std 802.11-2020 section 9.4.2.194.
*/
struct ieee80211_aid_response_ie {
__le16 aid;
@@ -1163,6 +1253,30 @@ struct ieee80211_twt_setup {
u8 params[];
} __packed;
+#define IEEE80211_TTLM_MAX_CNT 2
+#define IEEE80211_TTLM_CONTROL_DIRECTION 0x03
+#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP 0x04
+#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT 0x08
+#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT 0x10
+#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE 0x20
+
+#define IEEE80211_TTLM_DIRECTION_DOWN 0
+#define IEEE80211_TTLM_DIRECTION_UP 1
+#define IEEE80211_TTLM_DIRECTION_BOTH 2
+
+/**
+ * struct ieee80211_ttlm_elem - TID-To-Link Mapping element
+ *
+ * Defined in section 9.4.2.314 in P802.11be_D4
+ *
+ * @control: the first part of control field
+ * @optional: the second part of control field
+ */
+struct ieee80211_ttlm_elem {
+ u8 control;
+ u8 optional[];
+} __packed;
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -1349,8 +1463,11 @@ struct ieee80211_mgmt {
/* Supported rates membership selectors */
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
-#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
+#define BSS_MEMBERSHIP_SELECTOR_GLK 125
+#define BSS_MEMBERSHIP_SELECTOR_EPS 124
#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
+#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
+#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -1486,7 +1603,7 @@ struct ieee80211_tdls_data {
/*
* Peer-to-Peer IE attribute related definitions.
*/
-/**
+/*
* enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
*/
enum ieee80211_p2p_attr_id {
@@ -1536,11 +1653,17 @@ struct ieee80211_p2p_noa_attr {
#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F
/**
- * struct ieee80211_bar - HT Block Ack Request
+ * struct ieee80211_bar - Block Ack Request frame format
+ * @frame_control: Frame Control
+ * @duration: Duration
+ * @ra: RA
+ * @ta: TA
+ * @control: BAR Control
+ * @start_seq_num: Starting Sequence Number (see Figure 9-37)
*
- * This structure refers to "HT BlockAckReq" as
- * described in 802.11n draft section 7.2.1.7.1
- */
+ * This structure represents the "BlockAckReq frame format"
+ * as described in IEEE Std 802.11-2020 section 9.3.1.7.
+*/
struct ieee80211_bar {
__le16 frame_control;
__le16 duration;
@@ -1560,13 +1683,17 @@ struct ieee80211_bar {
#define IEEE80211_HT_MCS_MASK_LEN 10
/**
- * struct ieee80211_mcs_info - MCS information
+ * struct ieee80211_mcs_info - Supported MCS Set field
* @rx_mask: RX mask
* @rx_highest: highest supported RX rate. If set represents
* the highest supported RX data rate in units of 1 Mbps.
* If this field is 0 this value should not be used to
* consider the highest RX data rate supported.
* @tx_params: TX parameters
+ * @reserved: Reserved bits
+ *
+ * This structure represents the "Supported MCS Set field" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.55.4.
*/
struct ieee80211_mcs_info {
u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
@@ -1585,6 +1712,8 @@ struct ieee80211_mcs_info {
#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
+#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3)))
+
/*
* 802.11n D5.0 20.3.5 / 20.6 says:
* - indices 0 to 7 and 32 are single spatial stream
@@ -1597,10 +1726,16 @@ struct ieee80211_mcs_info {
(IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
/**
- * struct ieee80211_ht_cap - HT capabilities
+ * struct ieee80211_ht_cap - HT capabilities element
+ * @cap_info: HT Capability Information
+ * @ampdu_params_info: A-MPDU Parameters
+ * @mcs: Supported MCS Set
+ * @extended_ht_cap_info: HT Extended Capabilities
+ * @tx_BF_cap_info: Transmit Beamforming Capabilities
+ * @antenna_selection_info: ASEL Capability
*
- * This structure is the "HT capabilities element" as
- * described in 802.11n D5.0 7.3.2.57
+ * This structure represents the payload of the "HT Capabilities
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.55.
*/
struct ieee80211_ht_cap {
__le16 cap_info;
@@ -1688,9 +1823,14 @@ enum ieee80211_min_mpdu_spacing {
/**
* struct ieee80211_ht_operation - HT operation IE
+ * @primary_chan: Primary Channel
+ * @ht_param: HT Operation Information parameters
+ * @operation_mode: HT Operation Information operation mode
+ * @stbc_param: HT Operation Information STBC params
+ * @basic_set: Basic HT-MCS Set
*
- * This structure is the "HT operation element" as
- * described in 802.11n-2009 7.3.2.57
+ * This structure represents the payload of the "HT Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.56.
*/
struct ieee80211_ht_operation {
u8 primary_chan;
@@ -1859,9 +1999,12 @@ struct ieee80211_vht_operation {
/**
* struct ieee80211_he_cap_elem - HE capabilities element
+ * @mac_cap_info: HE MAC Capabilities Information
+ * @phy_cap_info: HE PHY Capabilities Information
*
- * This structure is the "HE capabilities element" fixed fields as
- * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3
+ * This structure represents the fixed fields of the payload of the
+ * "HE capabilities element" as described in IEEE Std 802.11ax-2021
+ * sections 9.4.2.248.2 and 9.4.2.248.3.
*/
struct ieee80211_he_cap_elem {
u8 mac_cap_info[6];
@@ -1920,35 +2063,45 @@ struct ieee80211_he_mcs_nss_supp {
} __packed;
/**
- * struct ieee80211_he_operation - HE capabilities element
+ * struct ieee80211_he_operation - HE Operation element
+ * @he_oper_params: HE Operation Parameters + BSS Color Information
+ * @he_mcs_nss_set: Basic HE-MCS And NSS Set
+ * @optional: Optional fields VHT Operation Information, Max Co-Hosted
+ * BSSID Indicator, and 6 GHz Operation Information
*
- * This structure is the "HE operation element" fields as
- * described in P802.11ax_D4.0 section 9.4.2.243
+ * This structure represents the payload of the "HE Operation
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.249.
*/
struct ieee80211_he_operation {
__le32 he_oper_params;
__le16 he_mcs_nss_set;
- /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */
u8 optional[];
} __packed;
/**
- * struct ieee80211_he_spr - HE spatial reuse element
+ * struct ieee80211_he_spr - Spatial Reuse Parameter Set element
+ * @he_sr_control: SR Control
+ * @optional: Optional fields Non-SRG OBSS PD Max Offset, SRG OBSS PD
+ * Min Offset, SRG OBSS PD Max Offset, SRG BSS Color
+ * Bitmap, and SRG Partial BSSID Bitmap
*
- * This structure is the "HE spatial reuse element" element as
- * described in P802.11ax_D4.0 section 9.4.2.241
+ * This structure represents the payload of the "Spatial Reuse
+ * Parameter Set element" as described in IEEE Std 802.11ax-2021
+ * section 9.4.2.252.
*/
struct ieee80211_he_spr {
u8 he_sr_control;
- /* Optional 0 to 19 bytes: depends on @he_sr_control */
u8 optional[];
} __packed;
/**
* struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ * @aifsn: ACI/AIFSN
+ * @ecw_min_max: ECWmin/ECWmax
+ * @mu_edca_timer: MU EDCA Timer
*
- * This structure is the "MU AC Parameter Record" fields as
- * described in P802.11ax_D4.0 section 9.4.2.245
+ * This structure represents the "MU AC Parameter Record" as described
+ * in IEEE Std 802.11ax-2021 section 9.4.2.251, Figure 9-788p.
*/
struct ieee80211_he_mu_edca_param_ac_rec {
u8 aifsn;
@@ -1958,9 +2111,14 @@ struct ieee80211_he_mu_edca_param_ac_rec {
/**
* struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ * @mu_qos_info: QoS Info
+ * @ac_be: MU AC_BE Parameter Record
+ * @ac_bk: MU AC_BK Parameter Record
+ * @ac_vi: MU AC_VI Parameter Record
+ * @ac_vo: MU AC_VO Parameter Record
*
- * This structure is the "MU EDCA Parameter Set element" fields as
- * described in P802.11ax_D4.0 section 9.4.2.245
+ * This structure represents the payload of the "MU EDCA Parameter Set
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.251.
*/
struct ieee80211_mu_edca_param_set {
u8 mu_qos_info;
@@ -1993,12 +2151,18 @@ struct ieee80211_mu_edca_param_set {
* @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
* supported for reception and the maximum number of spatial streams
* supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
*/
struct ieee80211_eht_mcs_nss_supp_20mhz_only {
- u8 rx_tx_mcs7_max_nss;
- u8 rx_tx_mcs9_max_nss;
- u8 rx_tx_mcs11_max_nss;
- u8 rx_tx_mcs13_max_nss;
+ union {
+ struct {
+ u8 rx_tx_mcs7_max_nss;
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[4];
+ };
};
/**
@@ -2018,11 +2182,17 @@ struct ieee80211_eht_mcs_nss_supp_20mhz_only {
* @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
* supported for reception and the maximum number of spatial streams
* supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
*/
struct ieee80211_eht_mcs_nss_supp_bw {
- u8 rx_tx_mcs9_max_nss;
- u8 rx_tx_mcs11_max_nss;
- u8 rx_tx_mcs13_max_nss;
+ union {
+ struct {
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[3];
+ };
};
/**
@@ -2075,7 +2245,7 @@ struct ieee80211_eht_cap_elem {
*/
struct ieee80211_eht_operation {
u8 params;
- __le32 basic_mcs_nss;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only basic_mcs_nss;
u8 optional[];
} __packed;
@@ -2162,9 +2332,9 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
* enum ieee80211_ap_reg_power - regulatory power for a Access Point
*
* @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode
- * @IEEE80211_REG_LPI: Indoor Access Point
- * @IEEE80211_REG_SP: Standard power Access Point
- * @IEEE80211_REG_VLP: Very low power Access Point
+ * @IEEE80211_REG_LPI_AP: Indoor Access Point
+ * @IEEE80211_REG_SP_AP: Standard power Access Point
+ * @IEEE80211_REG_VLP_AP: Very low power Access Point
* @IEEE80211_REG_AP_POWER_AFTER_LAST: internal
* @IEEE80211_REG_AP_POWER_MAX: maximum value
*/
@@ -2550,9 +2720,10 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
/**
- * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
+ * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
* @primary: primary channel
* @control: control flags
* @ccfs0: channel center frequency segment 0
@@ -2599,9 +2770,13 @@ enum ieee80211_tx_power_intrpt_type {
};
/**
- * struct ieee80211_tx_pwr_env
+ * struct ieee80211_tx_pwr_env - Transmit Power Envelope
+ * @tx_power_info: Transmit Power Information field
+ * @tx_power: Maximum Transmit Power field
*
- * This structure represents the "Transmit Power Envelope element"
+ * This structure represents the payload of the "Transmit Power
+ * Envelope element" as described in IEEE Std 802.11ax-2021 section
+ * 9.4.2.161
*/
struct ieee80211_tx_pwr_env {
u8 tx_power_info;
@@ -2656,12 +2831,14 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
static inline const struct ieee80211_he_6ghz_oper *
ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
{
- const u8 *ret = (const void *)&he_oper->optional;
+ const u8 *ret;
u32 he_oper_params;
if (!he_oper)
return NULL;
+ ret = (const void *)&he_oper->optional;
+
he_oper_params = le32_to_cpu(he_oper->he_oper_params);
if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
@@ -2856,6 +3033,7 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
/* Maximum number of supported EHT LTF is split */
#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x40
#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
@@ -2997,6 +3175,28 @@ ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
return len >= needed;
}
+#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1)
+
+struct ieee80211_bandwidth_indication {
+ u8 params;
+ struct ieee80211_eht_operation_info info;
+} __packed;
+
+static inline bool
+ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_bandwidth_indication *bwi = (const void *)data;
+
+ if (len < sizeof(*bwi))
+ return false;
+
+ if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT &&
+ len < sizeof(*bwi) + 2)
+ return false;
+
+ return true;
+}
+
#define LISTEN_INT_USF GENMASK(15, 14)
#define LISTEN_INT_UI GENMASK(13, 0)
@@ -3454,6 +3654,8 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_EHT_OPERATION = 106,
WLAN_EID_EXT_EHT_MULTI_LINK = 107,
WLAN_EID_EXT_EHT_CAPABILITY = 108,
+ WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109,
+ WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
};
/* Action category code */
@@ -4221,6 +4423,36 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
}
/**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+ u8 action;
+
+ if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+ skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+ return false;
+
+ action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+ return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+ action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+ action != WLAN_PUB_ACTION_MSMT_PILOT &&
+ action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+ action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+ action != WLAN_PUB_ACTION_FTM_REQUEST &&
+ action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY &&
+ action != WLAN_PUB_ACTION_VENDOR_SPECIFIC;
+}
+
+/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
* privacy action frame
* @hdr: the frame
@@ -4291,12 +4523,11 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
/**
* ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
* @skb: the skb containing the frame, length will not be checked
- * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
*
* This function assumes the frame is a data frame, and that the network header
* is in the correct place.
*/
-static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
+static inline int ieee80211_get_tdls_action(struct sk_buff *skb)
{
if (!skb_is_nonlinear(skb) &&
skb->len > (skb_network_offset(skb) + 2)) {
@@ -4462,7 +4693,7 @@ static inline bool for_each_element_completed(const struct element *element,
return (const u8 *)element == (const u8 *)data + datalen;
}
-/**
+/*
* RSNX Capabilities:
* bits 0-3: Field length (n-1)
*/
@@ -4477,8 +4708,8 @@ static inline bool for_each_element_completed(const struct element *element,
#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04
#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08
#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13
+#define IEEE80211_TBTT_INFO_TYPE_TBTT 0
+#define IEEE80211_TBTT_INFO_TYPE_MLD 1
#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01
#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02
@@ -4488,6 +4719,9 @@ static inline bool for_each_element_completed(const struct element *element,
#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20
#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_NO_LIMIT 127
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED -128
+
struct ieee80211_neighbor_ap_info {
u8 tbtt_info_hdr;
u8 tbtt_info_len;
@@ -4502,6 +4736,42 @@ enum ieee80211_range_params_max_total_ltf {
IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED,
};
+/*
+ * reduced neighbor report, based on Draft P802.11be_D3.0,
+ * section 9.4.2.170.2.
+ */
+struct ieee80211_rnr_mld_params {
+ u8 mld_id;
+ __le16 params;
+} __packed;
+
+#define IEEE80211_RNR_MLD_PARAMS_LINK_ID 0x000F
+#define IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT 0x0FF0
+#define IEEE80211_RNR_MLD_PARAMS_UPDATES_INCLUDED 0x1000
+#define IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK 0x2000
+
+/* Format of the TBTT information element if it has 7, 8 or 9 bytes */
+struct ieee80211_tbtt_info_7_8_9 {
+ u8 tbtt_offset;
+ u8 bssid[ETH_ALEN];
+
+ /* The following element is optional, structure may not grow */
+ u8 bss_params;
+ s8 psd_20;
+} __packed;
+
+/* Format of the TBTT information element if it has >= 11 bytes */
+struct ieee80211_tbtt_info_ge_11 {
+ u8 tbtt_offset;
+ u8 bssid[ETH_ALEN];
+ __le32 short_ssid;
+
+ /* The following elements are optional, structure may grow */
+ u8 bss_params;
+ s8 psd_20;
+ struct ieee80211_rnr_mld_params mld_params;
+} __packed;
+
/* multi-link device */
#define IEEE80211_MLD_MAX_NUM_LINKS 15
@@ -4529,6 +4799,14 @@ struct ieee80211_multi_link_elem {
#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00
#define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS 0xf000
+/*
+ * Described in P802.11be_D3.0
+ * dot11MSDTimerDuration should default to 5484 (i.e. 171.375)
+ * dot11MSDOFDMEDthreshold defaults to -72 (i.e. 0)
+ * dot11MSDTXOPMAX defaults to 1
+ */
+#define IEEE80211_MED_SYNC_DELAY_DEFAULT 0x10ac
+
#define IEEE80211_EML_CAP_EMLSR_SUPP 0x0001
#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x000e
#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US 0
@@ -4611,15 +4889,12 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
case IEEE80211_ML_CONTROL_TYPE_BASIC:
case IEEE80211_ML_CONTROL_TYPE_PREQ:
case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
/*
* The length is the first octet pointed by mle->variable so no
* need to add anything
*/
break;
- case IEEE80211_ML_CONTROL_TYPE_RECONF:
- if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
- common += ETH_ALEN;
- return common;
case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR)
common += ETH_ALEN;
@@ -4633,6 +4908,95 @@ static inline u8 ieee80211_mle_common_size(const u8 *data)
}
/**
+ * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
+ * @mle: the basic multi link element
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the BSS parameter change count value can't be found (the presence bit
+ * for it is clear), 0 will be returned.
+ */
+static inline u8
+ieee80211_mle_get_bss_param_ch_cnt(const struct ieee80211_multi_link_elem *mle)
+{
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay
+ * @data: pointer to the multi link EHT IE
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the medium synchronization is not present, then the default value is
+ * returned.
+ */
+static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
+ return IEEE80211_MED_SYNC_DELAY_DEFAULT;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_eml_cap - returns the EML capability
+ * @data: pointer to the multi link EHT IE
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ *
+ * If the EML capability is not present, 0 will be returned.
+ */
+static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_EML_CAPA))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/**
* ieee80211_mle_size_ok - validate multi-link element size
* @data: pointer to the element data
* @len: length of the containing element
@@ -4700,6 +5064,28 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
return mle->variable[0] >= common;
}
+/**
+ * ieee80211_mle_type_ok - validate multi-link element type and size
+ * @data: pointer to the element data
+ * @type: expected type of the element
+ * @len: length of the containing element
+ */
+static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control;
+
+ if (!ieee80211_mle_size_ok(data, len))
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) == type)
+ return true;
+
+ return false;
+}
+
enum ieee80211_mle_subelems {
IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0,
IEEE80211_MLE_SUBELEM_FRAGMENT = 254,
@@ -4722,11 +5108,13 @@ struct ieee80211_mle_per_sta_profile {
} __packed;
/**
- * ieee80211_mle_sta_prof_size_ok - validate multi-link element sta profile size
+ * ieee80211_mle_basic_sta_prof_size_ok - validate basic multi-link element sta
+ * profile size
* @data: pointer to the sub element data
* @len: length of the containing sub element
*/
-static inline bool ieee80211_mle_sta_prof_size_ok(const u8 *data, size_t len)
+static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
+ size_t len)
{
const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
u16 control;
@@ -4746,21 +5134,126 @@ static inline bool ieee80211_mle_sta_prof_size_ok(const u8 *data, size_t len)
info_len += 8;
if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
info_len += 2;
- if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
- info_len += 1;
-
if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
- control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE) {
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
info_len += 2;
else
info_len += 1;
}
+ if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
+ info_len += 1;
return prof->sta_info_len >= info_len &&
fixed + prof->sta_info_len <= len;
}
+/**
+ * ieee80211_mle_basic_sta_prof_bss_param_ch_cnt - get per-STA profile BSS
+ * parameter change count
+ * @prof: the per-STA profile, having been checked with
+ * ieee80211_mle_basic_sta_prof_size_ok() for the correct length
+ *
+ * Return: The BSS parameter change count value if present, 0 otherwise.
+ */
+static inline u8
+ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta_profile *prof)
+{
+ u16 control = le16_to_cpu(prof->control);
+ const u8 *pos = prof->variable;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT))
+ return 0;
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ pos += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ pos += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ pos += 2;
+ else
+ pos += 1;
+ }
+
+ return *pos;
+}
+
+#define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT 0x0040
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_UPDATE_TYPE 0x0780
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800
+
+/**
+ * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link
+ * element sta profile size.
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ */
+static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data,
+ size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += ETH_ALEN;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT)
+ info_len += 2;
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len - 1 <= len;
+}
+
+static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_ttlm_elem *t2l = (const void *)data;
+ u8 control, fixed = sizeof(*t2l), elem_len = 0;
+
+ if (len < fixed)
+ return false;
+
+ control = t2l->control;
+
+ if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT)
+ elem_len += 2;
+ if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)
+ elem_len += 3;
+
+ if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) {
+ u8 bm_size;
+
+ elem_len += 1;
+ if (len < fixed + elem_len)
+ return false;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ bm_size = 1;
+ else
+ bm_size = 2;
+
+ elem_len += hweight8(t2l->optional[0]) * bm_size;
+ }
+
+ return len >= fixed + elem_len;
+}
+
#define for_each_mle_subelement(_elem, _data, _len) \
if (ieee80211_mle_size_ok(_data, _len)) \
for_each_element(_elem, \
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 1ed52441972f..10a1e81434cb 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -53,6 +53,10 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_NONE:
case ARPHRD_RAWIP:
case ARPHRD_PIMREG:
+ /* PPP adds its l2 header automatically in ppp_start_xmit().
+ * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init().
+ */
+ case ARPHRD_PPP:
return false;
default:
return true;
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 8de6b6e67829..cdc684e04a2f 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -162,8 +162,8 @@ struct team_option {
bool per_port;
unsigned int array_size; /* != 0 means the option is array */
enum team_option_type type;
- int (*init)(struct team *team, struct team_option_inst_info *info);
- int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
+ void (*init)(struct team *team, struct team_option_inst_info *info);
+ void (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
};
@@ -189,6 +189,8 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
+ const struct header_ops *header_ops_cache;
+
struct mutex lock; /* used for overall locking, e.g. port lists write */
/*
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 6ba71957851e..c1645c86eed9 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -408,7 +408,7 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
@@ -437,7 +437,7 @@ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
@@ -457,7 +457,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
@@ -540,7 +540,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
if (!eth_type_vlan(veth->h_vlan_proto))
- return -EINVAL;
+ return -ENODATA;
*vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
@@ -561,7 +561,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
return 0;
} else {
*vlan_tci = 0;
- return -EINVAL;
+ return -ENODATA;
}
}
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index ebf4349a53af..5171231f70a8 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -39,7 +39,7 @@ struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- __be32 sl_addr[];
+ __be32 sl_addr[] __counted_by(sl_max);
};
#define IP_SFBLOCK 10 /* allocate this many at once */
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 7852f6c9a714..719cf9cc6e1a 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -8,6 +8,8 @@
#ifndef __AD_SIGMA_DELTA_H__
#define __AD_SIGMA_DELTA_H__
+#include <linux/iio/iio.h>
+
enum ad_sigma_delta_mode {
AD_SD_MODE_CONTINUOUS = 0,
AD_SD_MODE_SINGLE = 1,
@@ -99,7 +101,7 @@ struct ad_sigma_delta {
* 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
* rounded to 16 bytes to take into account padding.
*/
- uint8_t tx_buf[4] ____cacheline_aligned;
+ uint8_t tx_buf[4] __aligned(IIO_DMA_MINALIGN);
uint8_t rx_buf[16] __aligned(8);
};
diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
index 52620e5b8052..b7904992d561 100644
--- a/include/linux/iio/adc/adi-axi-adc.h
+++ b/include/linux/iio/adc/adi-axi-adc.h
@@ -41,6 +41,7 @@ struct adi_axi_adc_chip_info {
* @reg_access IIO debugfs_reg_access hook for the client ADC
* @read_raw IIO read_raw hook for the client ADC
* @write_raw IIO write_raw hook for the client ADC
+ * @read_avail IIO read_avail hook for the client ADC
*/
struct adi_axi_adc_conv {
const struct adi_axi_adc_chip_info *chip_info;
@@ -54,6 +55,9 @@ struct adi_axi_adc_conv {
int (*write_raw)(struct adi_axi_adc_conv *conv,
struct iio_chan_spec const *chan,
int val, int val2, long mask);
+ int (*read_avail)(struct adi_axi_adc_conv *conv,
+ struct iio_chan_spec const *chan,
+ const int **val, int *type, int *length, long mask);
};
struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 6564bdcdac66..18d3702fa95d 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -19,14 +19,12 @@ struct device;
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
- * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
* @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
* @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
* @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
* @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
*/
enum iio_block_state {
- IIO_BLOCK_STATE_DEQUEUED,
IIO_BLOCK_STATE_QUEUED,
IIO_BLOCK_STATE_ACTIVE,
IIO_BLOCK_STATE_DONE,
@@ -73,12 +71,15 @@ struct iio_dma_buffer_block {
* @active_block: Block being used in read()
* @pos: Read offset in the active block
* @block_size: Size of each block
+ * @next_dequeue: index of next block that will be dequeued
*/
struct iio_dma_buffer_queue_fileio {
struct iio_dma_buffer_block *blocks[2];
struct iio_dma_buffer_block *active_block;
size_t pos;
size_t block_size;
+
+ unsigned int next_dequeue;
};
/**
@@ -93,7 +94,6 @@ struct iio_dma_buffer_queue_fileio {
* list and typically also a list of active blocks in the part that handles
* the DMA controller
* @incoming: List of buffers on the incoming queue
- * @outgoing: List of buffers on the outgoing queue
* @active: Whether the buffer is currently active
* @fileio: FileIO state
*/
@@ -105,7 +105,6 @@ struct iio_dma_buffer_queue {
struct mutex lock;
spinlock_t list_lock;
struct list_head incoming;
- struct list_head outgoing;
bool active;
diff --git a/include/linux/iio/common/inv_sensors_timestamp.h b/include/linux/iio/common/inv_sensors_timestamp.h
new file mode 100644
index 000000000000..a47d304d1ba7
--- /dev/null
+++ b/include/linux/iio/common/inv_sensors_timestamp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Invensense, Inc.
+ */
+
+#ifndef INV_SENSORS_TIMESTAMP_H_
+#define INV_SENSORS_TIMESTAMP_H_
+
+/**
+ * struct inv_sensors_timestamp_chip - chip internal properties
+ * @clock_period: internal clock period in ns
+ * @jitter: acceptable jitter in per-mille
+ * @init_period: chip initial period at reset in ns
+ */
+struct inv_sensors_timestamp_chip {
+ uint32_t clock_period;
+ uint32_t jitter;
+ uint32_t init_period;
+};
+
+/**
+ * struct inv_sensors_timestamp_interval - timestamps interval
+ * @lo: interval lower bound
+ * @up: interval upper bound
+ */
+struct inv_sensors_timestamp_interval {
+ int64_t lo;
+ int64_t up;
+};
+
+/**
+ * struct inv_sensors_timestamp_acc - accumulator for computing an estimation
+ * @val: current estimation of the value, the mean of all values
+ * @idx: current index of the next free place in values table
+ * @values: table of all measured values, use for computing the mean
+ */
+struct inv_sensors_timestamp_acc {
+ uint32_t val;
+ size_t idx;
+ uint32_t values[32];
+};
+
+/**
+ * struct inv_sensors_timestamp - timestamp management states
+ * @chip: chip internal characteristics
+ * @min_period: minimal acceptable clock period
+ * @max_period: maximal acceptable clock period
+ * @it: interrupts interval timestamps
+ * @timestamp: store last timestamp for computing next data timestamp
+ * @mult: current internal period multiplier
+ * @new_mult: new set internal period multiplier (not yet effective)
+ * @period: measured current period of the sensor
+ * @chip_period: accumulator for computing internal chip period
+ */
+struct inv_sensors_timestamp {
+ struct inv_sensors_timestamp_chip chip;
+ uint32_t min_period;
+ uint32_t max_period;
+ struct inv_sensors_timestamp_interval it;
+ int64_t timestamp;
+ uint32_t mult;
+ uint32_t new_mult;
+ uint32_t period;
+ struct inv_sensors_timestamp_acc chip_period;
+};
+
+void inv_sensors_timestamp_init(struct inv_sensors_timestamp *ts,
+ const struct inv_sensors_timestamp_chip *chip);
+
+int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ uint32_t period, bool fifo);
+
+void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ size_t sensor_nb, int64_t timestamp);
+
+static inline int64_t inv_sensors_timestamp_pop(struct inv_sensors_timestamp *ts)
+{
+ ts->timestamp += ts->period;
+ return ts->timestamp;
+}
+
+void inv_sensors_timestamp_apply_odr(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ unsigned int fifo_no);
+
+static inline void inv_sensors_timestamp_reset(struct inv_sensors_timestamp *ts)
+{
+ const struct inv_sensors_timestamp_interval interval_init = {0LL, 0LL};
+
+ ts->it = interval_init;
+ ts->timestamp = 0;
+}
+
+#endif
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index f5f3ee57bc70..f9ae5cdd884f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -22,6 +22,7 @@
#include <linux/platform_data/st_sensors_pdata.h>
#define LSM9DS0_IMU_DEV_NAME "lsm9ds0"
+#define LSM303D_IMU_DEV_NAME "lsm303d"
/*
* Buffer size max case: 2bytes per channel, 3 channels in total +
@@ -257,9 +258,9 @@ struct st_sensor_data {
bool hw_irq_trigger;
s64 hw_timestamp;
- char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
-
struct mutex odr_lock;
+
+ char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN);
};
#ifdef CONFIG_IIO_BUFFER
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 6802596b017c..e9910b41d48e 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -201,8 +201,9 @@ struct iio_dev
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_channel_raw(struct iio_channel *chan,
int *val);
@@ -212,8 +213,9 @@ int iio_read_channel_raw(struct iio_channel *chan,
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*
* In opposit to the normal iio_read_channel_raw this function
* returns the average of multiple reads.
@@ -281,8 +283,9 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val,
* @chan: The channel being queried.
* @val: Value being written.
*
- * Note raw writes to iio channels are in dac counts and hence
- * scale will need to be applied if standard units required.
+ * Note that for raw writes to iio channels, if the value provided is
+ * in standard units, the affect of the scale and offset must be removed
+ * as (value / scale) - offset.
*/
int iio_write_channel_raw(struct iio_channel *chan, int val);
@@ -292,12 +295,25 @@ int iio_write_channel_raw(struct iio_channel *chan, int val);
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
/**
+ * iio_read_min_channel_raw() - read minimum available raw value from a given
+ * channel, i.e. the minimum possible value.
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
+ */
+int iio_read_min_channel_raw(struct iio_channel *chan, int *val);
+
+/**
* iio_read_avail_channel_raw() - read available raw values from a given channel
* @chan: The channel being queried.
* @vals: Available values read back.
@@ -308,8 +324,9 @@ int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
* For ranges, three vals are always returned; min, step and max.
* For lists, all the possible values are enumerated.
*
- * Note raw available values from iio channels are in adc counts and
- * hence scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw available values from iio
+ * channels need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_avail_channel_raw(struct iio_channel *chan,
const int **vals, int *length);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 81413cd3a3e7..c5b36d2c1e73 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -221,6 +221,9 @@ struct iio_event_spec {
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
+ * This field is deprecated in favour of providing
+ * iio_info->read_label() to override the label, which
+ * unlike @extend_name does not affect sysfs filenames.
* @datasheet_name: A name used in in-kernel mapping of channels. It should
* correspond to the first name that the channel is referred
* to by in the datasheet (e.g. IND), or the nearest
@@ -424,18 +427,14 @@ struct iio_trigger; /* forward declaration */
* @write_event_config: set if the event is enabled.
* @read_event_value: read a configuration value associated with the event.
* @write_event_value: write a configuration value for the event.
+ * @read_event_label: function to request label name for a specified label,
+ * for better event identification.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
- * @of_xlate: function pointer to obtain channel specifier index.
- * When #iio-cells is greater than '0', the driver could
- * provide a custom of_xlate function that reads the
- * *args* and returns the appropriate index in registered
- * IIO channels array.
* @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
- * Functionally the same as @of_xlate.
* @hwfifo_set_watermark: function pointer to set the current hardware
* fifo watermark level; see hwfifo_* entries in
* Documentation/ABI/testing/sysfs-bus-iio for details on
@@ -508,6 +507,12 @@ struct iio_info {
enum iio_event_direction dir,
enum iio_event_info info, int val, int val2);
+ int (*read_event_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ char *label);
+
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
int (*update_scan_mode)(struct iio_dev *indio_dev,
@@ -553,7 +558,9 @@ struct iio_buffer_setup_ops {
* and owner
* @buffer: [DRIVER] any buffer present
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
+ * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
+ * array in order of preference, the most preferred
+ * masks first.
* @masklength: [INTERN] the length of the mask established from
* channels
* @active_scan_mask: [INTERN] union of all scan masks requested by buffers
@@ -722,7 +729,7 @@ static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
* must not share cachelines with the rest of the structure, thus making
* them safe for use with non-coherent DMA.
*/
-#define IIO_DMA_MINALIGN ARCH_KMALLOC_MINALIGN
+#define IIO_DMA_MINALIGN ARCH_DMA_MINALIGN
struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
/* The information at the returned address is guaranteed to be cacheline aligned */
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index dc9ea299e088..8898966bc0f0 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -11,6 +11,7 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#define ADIS_WRITE_REG(reg) ((0x80 | (reg)))
@@ -131,7 +132,7 @@ struct adis {
unsigned long irq_flag;
void *buffer;
- u8 tx[10] ____cacheline_aligned;
+ u8 tx[10] __aligned(IIO_DMA_MINALIGN);
u8 rx[4];
};
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index eff1e6b2595c..0f7fe7b522e3 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_device_type(struct iio_sw_device_type *dt);
struct iio_sw_device *iio_sw_device_create(const char *, const char *);
void iio_sw_device_destroy(struct iio_sw_device *);
-int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt);
-void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
-
static inline
void iio_swd_group_init_type_name(struct iio_sw_device *d,
const char *name,
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index 47de2443e984..bc77f88df303 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt);
struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *);
void iio_sw_trigger_destroy(struct iio_sw_trigger *);
-int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt);
-void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt);
-
static inline
void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 51f52c5c6092..bce3b1788199 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -171,6 +171,7 @@ void iio_trigger_free(struct iio_trigger *trig);
*/
bool iio_trigger_using_own(struct iio_dev *indio_dev);
+int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig);
int iio_trigger_validate_own_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 82faa98c719a..d89982c98368 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -19,6 +19,8 @@ enum iio_event_info {
IIO_EV_INFO_TIMEOUT,
IIO_EV_INFO_RESET_TIMEOUT,
IIO_EV_INFO_TAP2_MIN_DELAY,
+ IIO_EV_INFO_RUNNING_PERIOD,
+ IIO_EV_INFO_RUNNING_COUNT,
};
#define IIO_VAL_INT 1
@@ -66,6 +68,7 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
IIO_CHAN_INFO_CALIBAMBIENT,
IIO_CHAN_INFO_ZEROPOINT,
+ IIO_CHAN_INFO_TROUGH,
};
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index c1c76a70a6ce..adb83a42a6b9 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -11,7 +11,7 @@
* @__VA_ARGS__: arguments for @f
*
* Avoid retpoline overhead for known builtin, checking @f vs each of them and
- * eventually invoking directly the builtin function. The functions are check
+ * eventually invoking directly the builtin function. The functions are checked
* in the given order. Fallback to the indirect call.
*/
#define INDIRECT_CALL_1(f, f1, ...) \
diff --git a/include/linux/init.h b/include/linux/init.h
index c5fe6d26f5b1..3fa3f6241350 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -89,9 +89,6 @@
__latent_entropy
#define __meminitdata __section(".meminit.data")
#define __meminitconst __section(".meminit.rodata")
-#define __memexit __section(".memexit.text") __exitused __cold notrace
-#define __memexitdata __section(".memexit.data")
-#define __memexitconst __section(".memexit.rodata")
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
@@ -152,6 +149,23 @@ extern unsigned int reset_devices;
void setup_arch(char **);
void prepare_namespace(void);
void __init init_rootfs(void);
+
+void init_IRQ(void);
+void time_init(void);
+void poking_init(void);
+void pgtable_cache_init(void);
+
+extern initcall_entry_t __initcall_start[];
+extern initcall_entry_t __initcall0_start[];
+extern initcall_entry_t __initcall1_start[];
+extern initcall_entry_t __initcall2_start[];
+extern initcall_entry_t __initcall3_start[];
+extern initcall_entry_t __initcall4_start[];
+extern initcall_entry_t __initcall5_start[];
+extern initcall_entry_t __initcall6_start[];
+extern initcall_entry_t __initcall7_start[];
+extern initcall_entry_t __initcall_end[];
+
extern struct file_system_type rootfs_fs_type;
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
@@ -165,6 +179,13 @@ extern void (*late_time_init)(void);
extern bool initcall_debug;
+#ifdef MODULE
+extern struct module __this_module;
+#define THIS_MODULE (&__this_module)
+#else
+#define THIS_MODULE ((struct module *)0)
+#endif
+
#endif
#ifndef MODULE
@@ -309,6 +330,8 @@ struct obs_kernel_param {
int early;
};
+extern const struct obs_kernel_param __setup_start[], __setup_end[];
+
/*
* Only for really core code. See moduleparam.h for the normal way.
*
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 40fc5813cf93..bccb3f1f6262 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -37,13 +37,6 @@ extern struct cred init_cred;
#define INIT_TASK_COMM "swapper"
-/* Attach to the init_task data structure for proper alignment */
-#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
-#define __init_task_data __section(".data..init_task")
-#else
-#define __init_task_data /**/
-#endif
-
/* Attach to the thread_info data structure for proper alignment */
#define __init_thread_info __section(".data..init_thread_info")
diff --git a/include/linux/input.h b/include/linux/input.h
index 49790c1bd2c4..de6503c0edb8 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -562,7 +562,7 @@ struct ff_device {
int max_effects;
struct ff_effect *effects;
- struct file *effect_owners[];
+ struct file *effect_owners[] __counted_by(max_effects);
};
int input_ff_create(struct input_dev *dev, unsigned int max_effects);
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
index 5fba52a56cd6..5705d5de3aea 100644
--- a/include/linux/input/as5011.h
+++ b/include/linux/input/as5011.h
@@ -7,7 +7,6 @@
*/
struct as5011_platform_data {
- unsigned int button_gpio;
unsigned int axis_irq; /* irq number */
unsigned long axis_irqflags;
char xp, xn; /* threshold for x axis */
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 3b8580bd33c1..2cf89a538b18 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -47,7 +47,7 @@ struct input_mt {
unsigned int flags;
unsigned int frame;
int *red;
- struct input_mt_slot slots[];
+ struct input_mt_slot slots[] __counted_by(num_slots);
};
static inline void input_mt_set_value(struct input_mt_slot *slot,
diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h
index d464ffb4db52..5192ae3f5ec1 100644
--- a/include/linux/input/navpoint.h
+++ b/include/linux/input/navpoint.h
@@ -5,5 +5,4 @@
struct navpoint_platform_data {
int port; /* PXA SSP port for pxa_ssp_request() */
- int gpio; /* GPIO for power on/off */
};
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
index cda1f706eaeb..aa0b3ffea935 100644
--- a/include/linux/instruction_pointer.h
+++ b/include/linux/instruction_pointer.h
@@ -2,7 +2,12 @@
#ifndef _LINUX_INSTRUCTION_POINTER_H
#define _LINUX_INSTRUCTION_POINTER_H
+#include <asm/linkage.h>
+
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
+#ifndef _THIS_IP_
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/media/dvb_math.h b/include/linux/int_log.h
index 8690ec42954d..0a6f58c38b61 100644
--- a/include/media/dvb_math.h
+++ b/include/linux/int_log.h
@@ -1,22 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * dvb-math provides some complex fixed-point math
- * operations shared between the dvb related stuff
+ * Provides fixed-point logarithm operations.
*
* Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
- *
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
*/
-#ifndef __DVB_MATH_H
-#define __DVB_MATH_H
+#ifndef __LINUX_INT_LOG_H
+#define __LINUX_INT_LOG_H
#include <linux/types.h>
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index f45f13304add..771622650247 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -94,6 +94,9 @@ int ishtp_cl_link(struct ishtp_cl *cl);
void ishtp_cl_unlink(struct ishtp_cl *cl);
int ishtp_cl_disconnect(struct ishtp_cl *cl);
int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_establish_connection(struct ishtp_cl *cl, const guid_t *uuid,
+ int tx_size, int rx_size, bool reset);
+void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset);
int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
int ishtp_cl_flush_queues(struct ishtp_cl *cl);
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index 9f4b6f5b822f..33f21bd85dbf 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -14,6 +14,12 @@
#include <linux/powercap.h>
#include <linux/cpuhotplug.h>
+enum rapl_if_type {
+ RAPL_IF_MSR, /* RAPL I/F using MSR registers */
+ RAPL_IF_MMIO, /* RAPL I/F using MMIO registers */
+ RAPL_IF_TPMI, /* RAPL I/F using TPMI registers */
+};
+
enum rapl_domain_type {
RAPL_DOMAIN_PACKAGE, /* entire package/socket */
RAPL_DOMAIN_PP0, /* core power plane */
@@ -30,17 +36,23 @@ enum rapl_domain_reg_id {
RAPL_DOMAIN_REG_POLICY,
RAPL_DOMAIN_REG_INFO,
RAPL_DOMAIN_REG_PL4,
+ RAPL_DOMAIN_REG_UNIT,
+ RAPL_DOMAIN_REG_PL2,
RAPL_DOMAIN_REG_MAX,
};
struct rapl_domain;
enum rapl_primitives {
- ENERGY_COUNTER,
POWER_LIMIT1,
POWER_LIMIT2,
POWER_LIMIT4,
+ ENERGY_COUNTER,
FW_LOCK,
+ FW_HIGH_LOCK,
+ PL1_LOCK,
+ PL2_LOCK,
+ PL4_LOCK,
PL1_ENABLE, /* power limit 1, aka long term */
PL1_CLAMP, /* allow frequency to go below OS request */
@@ -74,12 +86,13 @@ struct rapl_domain_data {
unsigned long timestamp;
};
-#define NR_POWER_LIMITS (3)
+#define NR_POWER_LIMITS (POWER_LIMIT4 + 1)
+
struct rapl_power_limit {
struct powercap_zone_constraint *constraint;
- int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
+ bool locked;
u64 last_power_limit;
};
@@ -87,21 +100,29 @@ struct rapl_package;
#define RAPL_DOMAIN_NAME_LENGTH 16
+union rapl_reg {
+ void __iomem *mmio;
+ u32 msr;
+ u64 val;
+};
+
struct rapl_domain {
char name[RAPL_DOMAIN_NAME_LENGTH];
enum rapl_domain_type id;
- u64 regs[RAPL_DOMAIN_REG_MAX];
+ union rapl_reg regs[RAPL_DOMAIN_REG_MAX];
struct powercap_zone power_zone;
struct rapl_domain_data rdd;
struct rapl_power_limit rpl[NR_POWER_LIMITS];
u64 attr_map; /* track capabilities */
unsigned int state;
- unsigned int domain_energy_unit;
+ unsigned int power_unit;
+ unsigned int energy_unit;
+ unsigned int time_unit;
struct rapl_package *rp;
};
struct reg_action {
- u64 reg;
+ union rapl_reg reg;
u64 mask;
u64 value;
int err;
@@ -121,16 +142,20 @@ struct reg_action {
* registers.
* @write_raw: Callback for writing RAPL interface specific
* registers.
+ * @defaults: internal pointer to interface default settings
+ * @rpi: internal pointer to interface primitive info
*/
struct rapl_if_priv {
+ enum rapl_if_type type;
struct powercap_control_type *control_type;
- struct rapl_domain *platform_rapl_domain;
enum cpuhp_state pcap_rapl_online;
- u64 reg_unit;
- u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
+ union rapl_reg reg_unit;
+ union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
int limits[RAPL_DOMAIN_MAX];
- int (*read_raw)(int cpu, struct reg_action *ra);
- int (*write_raw)(int cpu, struct reg_action *ra);
+ int (*read_raw)(int id, struct reg_action *ra);
+ int (*write_raw)(int id, struct reg_action *ra);
+ void *defaults;
+ void *rpi;
};
/* maximum rapl package domain name: package-%d-die-%d */
@@ -140,9 +165,6 @@ struct rapl_package {
unsigned int id; /* logical die id, equals physical 1-die systems */
unsigned int nr_domains;
unsigned long domain_map; /* bit map of active domains */
- unsigned int power_unit;
- unsigned int energy_unit;
- unsigned int time_unit;
struct rapl_domain *domains; /* array of domains, sized at runtime */
struct powercap_zone *power_zone; /* keep track of parent zone */
unsigned long power_limit_irq; /* keep track of package power limit
@@ -156,8 +178,8 @@ struct rapl_package {
struct rapl_if_priv *priv;
};
-struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv);
-struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
+struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu);
+struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu);
void rapl_remove_package(struct rapl_package *rp);
#endif /* __INTEL_RAPL_H__ */
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
index f505788c05da..a3529b962be6 100644
--- a/include/linux/intel_tpmi.h
+++ b/include/linux/intel_tpmi.h
@@ -6,6 +6,25 @@
#ifndef _INTEL_TPMI_H_
#define _INTEL_TPMI_H_
+#include <linux/bitfield.h>
+
+#define TPMI_VERSION_INVALID 0xff
+#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val)
+#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val)
+
+/*
+ * List of supported TMPI IDs.
+ * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
+ */
+enum intel_tpmi_id {
+ TPMI_ID_RAPL = 0, /* Running Average Power Limit */
+ TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
+ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
+ TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
+ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
+};
+
/**
* struct intel_tpmi_plat_info - Platform information for a TPMI device instance
* @package_id: CPU Package id
@@ -26,5 +45,6 @@ struct intel_tpmi_plat_info {
struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev);
struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
int tpmi_get_resource_count(struct auxiliary_device *auxdev);
-
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked,
+ bool *write_blocked);
#endif
diff --git a/include/linux/interconnect-clk.h b/include/linux/interconnect-clk.h
new file mode 100644
index 000000000000..0cd80112bea5
--- /dev/null
+++ b/include/linux/interconnect-clk.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef __LINUX_INTERCONNECT_CLK_H
+#define __LINUX_INTERCONNECT_CLK_H
+
+struct device;
+
+struct icc_clk_data {
+ struct clk *clk;
+ const char *name;
+};
+
+struct icc_provider *icc_clk_register(struct device *dev,
+ unsigned int first_id,
+ unsigned int num_clocks,
+ const struct icc_clk_data *data);
+void icc_clk_unregister(struct icc_provider *provider);
+
+#endif
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index e6d8aca6886d..7ba183f221f1 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -33,7 +33,7 @@ struct icc_node_data {
*/
struct icc_onecell_data {
unsigned int num_nodes;
- struct icc_node *nodes[];
+ struct icc_node *nodes[] __counted_by(num_nodes);
};
struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index 2b0e784ba771..97ac253df62c 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -40,8 +40,6 @@ struct icc_bulk_data {
#if IS_ENABLED(CONFIG_INTERCONNECT)
-struct icc_path *icc_get(struct device *dev, const int src_id,
- const int dst_id);
struct icc_path *of_icc_get(struct device *dev, const char *name);
struct icc_path *devm_of_icc_get(struct device *dev, const char *name);
int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths);
@@ -61,12 +59,6 @@ void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
#else
-static inline struct icc_path *icc_get(struct device *dev, const int src_id,
- const int dst_id)
-{
- return NULL;
-}
-
static inline struct icc_path *of_icc_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a92bce40b04b..76121c2bb4f8 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -566,11 +566,15 @@ enum
*
* _ RCU:
* 1) rcutree_migrate_callbacks() migrates the queue.
- * 2) rcu_report_dead() reports the final quiescent states.
+ * 2) rcutree_report_cpu_dead() reports the final quiescent states.
*
* _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ *
+ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
*/
-#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
+ BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
+
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 1b7a44b35616..86cf1f7ae389 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -100,6 +100,30 @@ struct io_pgtable_cfg {
const struct iommu_flush_ops *tlb;
struct device *iommu_dev;
+ /**
+ * @alloc: Custom page allocator.
+ *
+ * Optional hook used to allocate page tables. If this function is NULL,
+ * @free must be NULL too.
+ *
+ * Memory returned should be zeroed and suitable for dma_map_single() and
+ * virt_to_phys().
+ *
+ * Not all formats support custom page allocators. Before considering
+ * passing a non-NULL value, make sure the chosen page format supports
+ * this feature.
+ */
+ void *(*alloc)(void *cookie, size_t size, gfp_t gfp);
+
+ /**
+ * @free: Custom page de-allocator.
+ *
+ * Optional hook used to free page tables allocated with the @alloc
+ * hook. Must be non-NULL if @alloc is not NULL, must be NULL
+ * otherwise.
+ */
+ void (*free)(void *cookie, void *pages, size_t size);
+
/* Low-level data specific to the table format */
union {
struct {
@@ -166,6 +190,10 @@ struct io_pgtable_ops {
struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
+ int (*read_and_clear_dirty)(struct io_pgtable_ops *ops,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
};
/**
@@ -238,15 +266,25 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop,
}
/**
+ * enum io_pgtable_caps - IO page table backend capabilities.
+ */
+enum io_pgtable_caps {
+ /** @IO_PGTABLE_CAP_CUSTOM_ALLOCATOR: Backend accepts custom page table allocators. */
+ IO_PGTABLE_CAP_CUSTOM_ALLOCATOR = BIT(0),
+};
+
+/**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format.
*
* @alloc: Allocate a set of page tables described by cfg.
* @free: Free the page tables associated with iop.
+ * @caps: Combination of @io_pgtable_caps flags encoding the backend capabilities.
*/
struct io_pgtable_init_fns {
struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
void (*free)(struct io_pgtable *iop);
+ u32 caps;
};
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
diff --git a/include/linux/io.h b/include/linux/io.h
index 308f4f0cfb93..7304f2a69960 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -68,6 +68,11 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
+/* architectures can override this */
+pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
+ unsigned long size, pgprot_t prot);
+
+
#ifdef CONFIG_PCI
/*
* The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 7fe31b2cd02f..68ed6697fece 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -6,53 +6,13 @@
#include <linux/xarray.h>
#include <uapi/linux/io_uring.h>
-enum io_uring_cmd_flags {
- IO_URING_F_COMPLETE_DEFER = 1,
- IO_URING_F_UNLOCKED = 2,
- /* the request is executed from poll, it should not be freed */
- IO_URING_F_MULTISHOT = 4,
- /* executed by io-wq */
- IO_URING_F_IOWQ = 8,
- /* int's last bit, sign checks are usually faster than a bit test */
- IO_URING_F_NONBLOCK = INT_MIN,
-
- /* ctx state flags, for URING_CMD */
- IO_URING_F_SQE128 = (1 << 8),
- IO_URING_F_CQE32 = (1 << 9),
- IO_URING_F_IOPOLL = (1 << 10),
-};
-
-struct io_uring_cmd {
- struct file *file;
- const struct io_uring_sqe *sqe;
- union {
- /* callback to defer completions to task context */
- void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
- /* used for polled completion */
- void *cookie;
- };
- u32 cmd_op;
- u32 flags;
- u8 pdu[32]; /* available inline for free use */
-};
-
-static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
-{
- return sqe->cmd;
-}
-
#if defined(CONFIG_IO_URING)
-int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd);
-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
- unsigned issue_flags);
-void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned));
-struct sock *io_uring_get_socket(struct file *file);
void __io_uring_cancel(bool cancel_all);
void __io_uring_free(struct task_struct *tsk);
void io_uring_unreg_ringfd(void);
const char *io_uring_get_opcode(u8 opcode);
+int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+bool io_is_uring_fops(struct file *file);
static inline void io_uring_files_cancel(void)
{
@@ -72,23 +32,6 @@ static inline void io_uring_free(struct task_struct *tsk)
__io_uring_free(tsk);
}
#else
-static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd)
-{
- return -EOPNOTSUPP;
-}
-static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
- ssize_t ret2, unsigned issue_flags)
-{
-}
-static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned))
-{
-}
-static inline struct sock *io_uring_get_socket(struct file *file)
-{
- return NULL;
-}
static inline void io_uring_task_cancel(void)
{
}
@@ -102,6 +45,15 @@ static inline const char *io_uring_get_opcode(u8 opcode)
{
return "";
}
+static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline bool io_is_uring_fops(struct file *file)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
new file mode 100644
index 000000000000..e453a997c060
--- /dev/null
+++ b/include/linux/io_uring/cmd.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_CMD_H
+#define _LINUX_IO_URING_CMD_H
+
+#include <uapi/linux/io_uring.h>
+#include <linux/io_uring_types.h>
+
+/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
+#define IORING_URING_CMD_CANCELABLE (1U << 30)
+
+struct io_uring_cmd {
+ struct file *file;
+ const struct io_uring_sqe *sqe;
+ /* callback to defer completions to task context */
+ void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
+ u32 cmd_op;
+ u32 flags;
+ u8 pdu[32]; /* available inline for free use */
+};
+
+static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
+{
+ return sqe->cmd;
+}
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd);
+void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
+ unsigned issue_flags);
+void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned),
+ unsigned flags);
+
+void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags);
+
+#else
+static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, void *ioucmd)
+{
+ return -EOPNOTSUPP;
+}
+static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+ ssize_t ret2, unsigned issue_flags)
+{
+}
+static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned),
+ unsigned flags)
+{
+}
+static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+}
+#endif
+
+/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
+static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
+}
+
+static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
+}
+
+static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->task;
+}
+
+#endif /* _LINUX_IO_URING_CMD_H */
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 1b2a20a42413..854ad67a5f70 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -7,6 +7,37 @@
#include <linux/llist.h>
#include <uapi/linux/io_uring.h>
+enum {
+ /*
+ * A hint to not wake right away but delay until there are enough of
+ * tw's queued to match the number of CQEs the task is waiting for.
+ *
+ * Must not be used wirh requests generating more than one CQE.
+ * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
+ */
+ IOU_F_TWQ_LAZY_WAKE = 1,
+};
+
+enum io_uring_cmd_flags {
+ IO_URING_F_COMPLETE_DEFER = 1,
+ IO_URING_F_UNLOCKED = 2,
+ /* the request is executed from poll, it should not be freed */
+ IO_URING_F_MULTISHOT = 4,
+ /* executed by io-wq */
+ IO_URING_F_IOWQ = 8,
+ /* int's last bit, sign checks are usually faster than a bit test */
+ IO_URING_F_NONBLOCK = INT_MIN,
+
+ /* ctx state flags, for URING_CMD */
+ IO_URING_F_SQE128 = (1 << 8),
+ IO_URING_F_CQE32 = (1 << 9),
+ IO_URING_F_IOPOLL = (1 << 10),
+
+ /* set when uring wants to cancel a previously issued command */
+ IO_URING_F_CANCEL = (1 << 11),
+ IO_URING_F_COMPAT = (1 << 12),
+};
+
struct io_wq_work_node {
struct io_wq_work_node *next;
};
@@ -69,8 +100,8 @@ struct io_uring_task {
};
struct io_uring {
- u32 head ____cacheline_aligned_in_smp;
- u32 tail ____cacheline_aligned_in_smp;
+ u32 head;
+ u32 tail;
};
/*
@@ -176,7 +207,6 @@ struct io_submit_state {
unsigned short submit_nr;
unsigned int cqes_count;
struct blk_plug plug;
- struct io_uring_cqe cqes[16];
};
struct io_ev_fd {
@@ -205,15 +235,17 @@ struct io_ring_ctx {
unsigned int has_evfd: 1;
/* all CQEs should be posted only by the submitter task */
unsigned int task_complete: 1;
+ unsigned int lockless_cq: 1;
unsigned int syscall_iopoll: 1;
unsigned int poll_activated: 1;
unsigned int drain_disabled: 1;
unsigned int compat: 1;
+ struct task_struct *submitter_task;
+ struct io_rings *rings;
+ struct percpu_ref refs;
+
enum task_work_notify_mode notify_method;
- struct io_rings *rings;
- struct task_struct *submitter_task;
- struct percpu_ref refs;
} ____cacheline_aligned_in_smp;
/* submission data */
@@ -251,31 +283,26 @@ struct io_ring_ctx {
struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
- struct list_head io_buffers_cache;
struct io_hash_table cancel_table_locked;
- struct list_head cq_overflow_list;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
- } ____cacheline_aligned_in_smp;
-
- /* IRQ completion list, under ->completion_lock */
- struct io_wq_work_list locked_free_list;
- unsigned int locked_free_nr;
-
- const struct cred *sq_creds; /* cred used for __io_sq_thread() */
- struct io_sq_data *sq_data; /* if using sq thread polling */
- struct wait_queue_head sqo_sq_wait;
- struct list_head sqd_list;
-
- unsigned long check_cq;
-
- unsigned int file_alloc_start;
- unsigned int file_alloc_end;
+ /*
+ * ->iopoll_list is protected by the ctx->uring_lock for
+ * io_uring instances that don't use IORING_SETUP_SQPOLL.
+ * For SQPOLL, only the single threaded io_sq_thread() will
+ * manipulate the list, hence no extra locking is needed there.
+ */
+ struct io_wq_work_list iopoll_list;
+ bool poll_multi_queue;
- struct xarray personalities;
- u32 pers_next;
+ /*
+ * Any cancelable uring_cmd is added to this list in
+ * ->uring_cmd() by io_uring_cmd_insert_cancelable()
+ */
+ struct hlist_head cancelable_uring_cmd;
+ } ____cacheline_aligned_in_smp;
struct {
/*
@@ -288,39 +315,65 @@ struct io_ring_ctx {
unsigned cached_cq_tail;
unsigned cq_entries;
struct io_ev_fd __rcu *io_ev_fd;
- struct wait_queue_head cq_wait;
unsigned cq_extra;
} ____cacheline_aligned_in_smp;
+ /*
+ * task_work and async notification delivery cacheline. Expected to
+ * regularly bounce b/w CPUs.
+ */
struct {
- spinlock_t completion_lock;
-
- bool poll_multi_queue;
- atomic_t cq_wait_nr;
-
- /*
- * ->iopoll_list is protected by the ctx->uring_lock for
- * io_uring instances that don't use IORING_SETUP_SQPOLL.
- * For SQPOLL, only the single threaded io_sq_thread() will
- * manipulate the list, hence no extra locking is needed there.
- */
- struct io_wq_work_list iopoll_list;
- struct io_hash_table cancel_table;
-
struct llist_head work_llist;
-
- struct list_head io_buffers_comp;
+ unsigned long check_cq;
+ atomic_t cq_wait_nr;
+ atomic_t cq_timeouts;
+ struct wait_queue_head cq_wait;
} ____cacheline_aligned_in_smp;
/* timeouts */
struct {
spinlock_t timeout_lock;
- atomic_t cq_timeouts;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned cq_last_tm_flush;
} ____cacheline_aligned_in_smp;
+ struct io_uring_cqe completion_cqes[16];
+
+ spinlock_t completion_lock;
+
+ /* IRQ completion list, under ->completion_lock */
+ struct io_wq_work_list locked_free_list;
+ unsigned int locked_free_nr;
+
+ struct list_head io_buffers_comp;
+ struct list_head cq_overflow_list;
+ struct io_hash_table cancel_table;
+
+ struct hlist_head waitid_list;
+
+#ifdef CONFIG_FUTEX
+ struct hlist_head futex_list;
+ struct io_alloc_cache futex_cache;
+#endif
+
+ const struct cred *sq_creds; /* cred used for __io_sq_thread() */
+ struct io_sq_data *sq_data; /* if using sq thread polling */
+
+ struct wait_queue_head sqo_sq_wait;
+ struct list_head sqd_list;
+
+ unsigned int file_alloc_start;
+ unsigned int file_alloc_end;
+
+ struct xarray personalities;
+ u32 pers_next;
+
+ struct list_head io_buffers_cache;
+
+ /* deferred free list, protected by ->uring_lock */
+ struct hlist_head io_buf_list;
+
/* Keep this last, we don't need it for the fast path */
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
@@ -336,11 +389,6 @@ struct io_ring_ctx {
struct wait_queue_head rsrc_quiesce_wq;
unsigned rsrc_quiesce;
- struct list_head io_buffers_pages;
-
- #if defined(CONFIG_UNIX)
- struct socket *ring_sock;
- #endif
/* hashed buffered write serialization */
struct io_wq_hash *hash_map;
@@ -364,6 +412,15 @@ struct io_ring_ctx {
unsigned sq_thread_idle;
/* protected by ->completion_lock */
unsigned evfd_last_cq_tail;
+
+ /*
+ * If IORING_SETUP_NO_MMAP is used, then the below holds
+ * the gup'ed pages for the two rings, and the sqes.
+ */
+ unsigned short n_ring_pages;
+ unsigned short n_sqe_pages;
+ struct page **ring_pages;
+ struct page **sqe_pages;
};
struct io_tw_state {
@@ -399,13 +456,13 @@ enum {
REQ_F_SINGLE_POLL_BIT,
REQ_F_DOUBLE_POLL_BIT,
REQ_F_PARTIAL_IO_BIT,
- REQ_F_CQE32_INIT_BIT,
REQ_F_APOLL_MULTISHOT_BIT,
REQ_F_CLEAR_POLLIN_BIT,
REQ_F_HASH_LOCKED_BIT,
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
+ REQ_F_POLL_NO_LAZY_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -469,12 +526,12 @@ enum {
REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
/* fast poll multishot mode */
REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
- /* ->extra1 and ->extra2 are initialised */
- REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
/* recvmsg special flag, clear EPOLLIN */
REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
+ /* don't use lazy poll wake for this request */
+ REQ_F_POLL_NO_LAZY = BIT(REQ_F_POLL_NO_LAZY_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
@@ -569,13 +626,7 @@ struct io_kiocb {
struct io_task_work io_task_work;
unsigned nr_tw;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
- union {
- struct hlist_node hash_node;
- struct {
- u64 extra1;
- u64 extra2;
- };
- };
+ struct hlist_node hash_node;
/* internal polling, see IORING_FEAT_FAST_POLL */
struct async_poll *apoll;
/* opcode allocated if it needs to store data for async defer */
@@ -585,6 +636,11 @@ struct io_kiocb {
/* custom credentials, valid IFF REQ_F_CREDS is set */
const struct cred *creds;
struct io_wq_work work;
+
+ struct {
+ u64 extra1;
+ u64 extra2;
+ } big_cqe;
};
struct io_overflow_cqe {
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e2b836c2e119..96dd0acbba44 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -58,7 +58,11 @@ struct vm_fault;
#define IOMAP_F_DIRTY (1U << 1)
#define IOMAP_F_SHARED (1U << 2)
#define IOMAP_F_MERGED (1U << 3)
+#ifdef CONFIG_BUFFER_HEAD
#define IOMAP_F_BUFFER_HEAD (1U << 4)
+#else
+#define IOMAP_F_BUFFER_HEAD 0
+#endif /* CONFIG_BUFFER_HEAD */
#define IOMAP_F_XATTR (1U << 5)
/*
@@ -261,9 +265,10 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
-struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos);
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
+bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e8c9a7da1060..5e27cb3a3be9 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/iova_bitmap.h>
#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0)
@@ -37,6 +38,7 @@ struct bus_type;
struct device;
struct iommu_domain;
struct iommu_domain_ops;
+struct iommu_dirty_ops;
struct notifier_block;
struct iommu_sva;
struct iommu_fault_event;
@@ -64,7 +66,12 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
+#define __IOMMU_DOMAIN_PLATFORM (1U << 5)
+#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
+ on a stage-2 translation */
+
+#define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
/*
* This are the possible domain-types
*
@@ -80,6 +87,8 @@ struct iommu_domain_geometry {
* invalidation.
* IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
* represented by mm_struct's.
+ * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
+ * dma_api stuff. Do not use in new drivers.
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
@@ -90,10 +99,14 @@ struct iommu_domain_geometry {
__IOMMU_DOMAIN_DMA_API | \
__IOMMU_DOMAIN_DMA_FQ)
#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
+#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
+#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
struct iommu_domain {
unsigned type;
const struct iommu_domain_ops *ops;
+ const struct iommu_dirty_ops *dirty_ops;
+ const struct iommu_ops *owner; /* Whose domain_alloc we came from */
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
struct iommu_domain_geometry geometry;
struct iommu_dma_cookie *iova_cookie;
@@ -108,6 +121,11 @@ struct iommu_domain {
struct { /* IOMMU_DOMAIN_SVA */
struct mm_struct *mm;
int users;
+ /*
+ * Next iommu_domain in mm->iommu_mm->sva-domains list
+ * protected by iommu_sva_lock.
+ */
+ struct list_head next;
};
};
};
@@ -127,6 +145,12 @@ enum iommu_cap {
* this device.
*/
IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
+ /*
+ * IOMMU driver does not issue TLB maintenance during .unmap, so can
+ * usefully support the non-strict DMA flush queue.
+ */
+ IOMMU_CAP_DEFERRED_FLUSH,
+ IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
};
/* These are the possible reserved region types */
@@ -190,6 +214,8 @@ enum iommu_dev_features {
IOMMU_DEV_FEAT_IOPF,
};
+#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
+#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
#define IOMMU_PASID_INVALID (-1U)
typedef unsigned int ioasid_t;
@@ -220,16 +246,183 @@ struct iommu_iotlb_gather {
};
/**
+ * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
+ * @bitmap: IOVA bitmap
+ * @gather: Range information for a pending IOTLB flush
+ */
+struct iommu_dirty_bitmap {
+ struct iova_bitmap *bitmap;
+ struct iommu_iotlb_gather *gather;
+};
+
+/* Read but do not clear any dirty bits */
+#define IOMMU_DIRTY_NO_CLEAR (1 << 0)
+
+/**
+ * struct iommu_dirty_ops - domain specific dirty tracking operations
+ * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
+ * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
+ * into a bitmap, with a bit represented as a page.
+ * Reads the dirty PTE bits and clears it from IO
+ * pagetables.
+ */
+struct iommu_dirty_ops {
+ int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
+ int (*read_and_clear_dirty)(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
+};
+
+/**
+ * struct iommu_user_data - iommu driver specific user space data info
+ * @type: The data type of the user buffer
+ * @uptr: Pointer to the user buffer for copy_from_user()
+ * @len: The length of the user buffer in bytes
+ *
+ * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
+ * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
+ */
+struct iommu_user_data {
+ unsigned int type;
+ void __user *uptr;
+ size_t len;
+};
+
+/**
+ * struct iommu_user_data_array - iommu driver specific user space data array
+ * @type: The data type of all the entries in the user buffer array
+ * @uptr: Pointer to the user buffer array
+ * @entry_len: The fixed-width length of an entry in the array, in bytes
+ * @entry_num: The number of total entries in the array
+ *
+ * The user buffer includes an array of requests with format defined in
+ * include/uapi/linux/iommufd.h
+ */
+struct iommu_user_data_array {
+ unsigned int type;
+ void __user *uptr;
+ size_t entry_len;
+ u32 entry_num;
+};
+
+/**
+ * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @dst_data. Must match with @src_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user(
+ void *dst_data, const struct iommu_user_data *src_data,
+ unsigned int data_type, size_t data_len, size_t min_len)
+{
+ if (src_data->type != data_type)
+ return -EINVAL;
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (src_data->len < min_len || data_len < src_data->len)
+ return -EINVAL;
+ return copy_struct_from_user(dst_data, data_len, src_data->uptr,
+ src_data->len);
+}
+
+/**
+ * iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @kdst. Must match with @user_data->type
+ * @min_last: The last memember of the data structure @kdst points in the
+ * initial version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
+ __iommu_copy_struct_from_user(kdst, user_data, data_type, \
+ sizeof(*kdst), \
+ offsetofend(typeof(*kdst), min_last))
+
+/**
+ * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_array: Pointer to a struct iommu_user_data_array for a user space array
+ * @data_type: The data type of the @dst_data. Must match with @src_array.type
+ * @index: Index to the location in the array to copy user data from
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user_array(
+ void *dst_data, const struct iommu_user_data_array *src_array,
+ unsigned int data_type, unsigned int index, size_t data_len,
+ size_t min_len)
+{
+ struct iommu_user_data src_data;
+
+ if (WARN_ON(!src_array || index >= src_array->entry_num))
+ return -EINVAL;
+ if (!src_array->entry_num)
+ return -EINVAL;
+ src_data.uptr = src_array->uptr + src_array->entry_len * index;
+ src_data.len = src_array->entry_len;
+ src_data.type = src_array->type;
+
+ return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
+ data_len, min_len);
+}
+
+/**
+ * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ * array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ * @index: Index to the location in the array to copy user data from
+ * @min_last: The last member of the data structure @kdst points in the
+ * initial version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
+ min_last) \
+ __iommu_copy_struct_from_user_array( \
+ kdst, user_array, data_type, index, sizeof(*(kdst)), \
+ offsetofend(typeof(*(kdst)), min_last))
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
- * @domain_alloc: allocate iommu domain
+ * @hw_info: report iommu hardware information. The data buffer returned by this
+ * op is allocated in the iommu driver and freed by the caller after
+ * use. The information type is one of enum iommu_hw_info_type defined
+ * in include/uapi/linux/iommufd.h.
+ * @domain_alloc: allocate and return an iommu domain if success. Otherwise
+ * NULL is returned. The domain is not fully initialized until
+ * the caller iommu_domain_alloc() returns.
+ * @domain_alloc_user: Allocate an iommu domain corresponding to the input
+ * parameters as defined in include/uapi/linux/iommufd.h.
+ * Unlike @domain_alloc, it is called only by IOMMUFD and
+ * must fully initialize the new domain before return.
+ * Upon success, if the @user_data is valid and the @parent
+ * points to a kernel-managed domain, the new domain must be
+ * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be
+ * NULL while the @user_data can be optionally provided, the
+ * new domain must support __IOMMU_DOMAIN_PAGING.
+ * Upon failure, ERR_PTR must be returned.
+ * @domain_alloc_paging: Allocate an iommu_domain that can be used for
+ * UNMANAGED, DMA, and DMA_FQ domain types.
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
* group and attached to the groups domain
- * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op
- * is to support old IOMMU drivers, new drivers should use
- * default domains, and the common IOMMU DMA ops.
* @device_group: find iommu group for a particular device
* @get_resv_regions: Request list of reserved regions for a device
* @of_xlate: add OF master IDs to iommu grouping
@@ -248,17 +441,28 @@ struct iommu_iotlb_gather {
* will be blocked by the hardware.
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
+ * @identity_domain: An always available, always attachable identity
+ * translation.
+ * @blocked_domain: An always available, always attachable blocking
+ * translation.
+ * @default_domain: If not NULL this will always be set as the default domain.
+ * This should be an IDENTITY/BLOCKED/PLATFORM domain.
+ * Do not use in new drivers.
*/
struct iommu_ops {
bool (*capable)(struct device *dev, enum iommu_cap);
+ void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
+ struct iommu_domain *(*domain_alloc_user)(
+ struct device *dev, u32 flags, struct iommu_domain *parent,
+ const struct iommu_user_data *user_data);
+ struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
void (*probe_finalize)(struct device *dev);
- void (*set_platform_dma_ops)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
/* Request/Free a list of reserved regions for a device */
@@ -281,6 +485,9 @@ struct iommu_ops {
const struct iommu_domain_ops *default_domain_ops;
unsigned long pgsize_bitmap;
struct module *owner;
+ struct iommu_domain *identity_domain;
+ struct iommu_domain *blocked_domain;
+ struct iommu_domain *default_domain;
};
/**
@@ -299,15 +506,20 @@ struct iommu_ops {
* * ENODEV - device specific errors, not able to be attached
* * <others> - treated as ENODEV by the caller. Use is discouraged
* @set_dev_pasid: set an iommu domain to a pasid of device
- * @map: map a physically contiguous memory region to an iommu domain
* @map_pages: map a physically contiguous set of pages of the same size to
* an iommu domain.
- * @unmap: unmap a physically contiguous memory region from an iommu domain
* @unmap_pages: unmap a number of pages of the same size from an iommu domain
* @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
* @iotlb_sync_map: Sync mappings created recently using @map to the hardware
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
+ * @cache_invalidate_user: Flush hardware cache for user space IO page table.
+ * The @domain must be IOMMU_DOMAIN_NESTED. The @array
+ * passes in the cache invalidation requests, in form
+ * of a driver data structure. The driver must update
+ * array->entry_num to report the number of handled
+ * invalidation requests. The driver data structure
+ * must be defined in include/uapi/linux/iommufd.h
* @iova_to_phys: translate iova to physical address
* @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
* including no-snoop TLPs on PCIe or other platform
@@ -321,22 +533,20 @@ struct iommu_domain_ops {
int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid);
- int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped);
- size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *iotlb_gather);
size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
- void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
- size_t size);
+ int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
void (*iotlb_sync)(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
+ int (*cache_invalidate_user)(struct iommu_domain *domain,
+ struct iommu_user_data_array *array);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
dma_addr_t iova);
@@ -355,6 +565,7 @@ struct iommu_domain_ops {
* @list: Used by the iommu-core to keep a list of registered iommus
* @ops: iommu-ops for talking to this iommu
* @dev: struct device for sysfs handling
+ * @singleton_group: Used internally for drivers that have only one group
* @max_pasids: number of supported PASIDs
*/
struct iommu_device {
@@ -362,6 +573,7 @@ struct iommu_device {
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
struct device *dev;
+ struct iommu_group *singleton_group;
u32 max_pasids;
};
@@ -403,6 +615,9 @@ struct iommu_fault_param {
* @priv: IOMMU Driver private data
* @max_pasids: number of PASIDs this device can consume
* @attach_deferred: the dma domain attachment is deferred
+ * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
+ * @require_direct: device requires IOMMU_RESV_DIRECT regions
+ * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
@@ -416,6 +631,9 @@ struct dev_iommu {
void *priv;
u32 max_pasids;
u32 attach_deferred:1;
+ u32 pci_32bit_workaround:1;
+ u32 require_direct:1;
+ u32 shadow_on_flush:1;
};
int iommu_device_register(struct iommu_device *iommu,
@@ -444,17 +662,6 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
};
}
-static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
-{
- /*
- * Assume that valid ops must be installed if iommu_probe_device()
- * has succeeded. The device ops are essentially for internal use
- * within the IOMMU subsystem itself, so we should be able to trust
- * ourselves not to misuse the helper.
- */
- return dev->iommu->iommu_dev->ops;
-}
-
extern int bus_iommu_probe(const struct bus_type *bus);
extern bool iommu_present(const struct bus_type *bus);
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
@@ -626,12 +833,35 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
return gather && gather->queued;
}
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
+{
+ if (gather)
+ iommu_iotlb_gather_init(gather);
+
+ dirty->bitmap = bitmap;
+ dirty->gather = gather;
+}
+
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
+{
+ if (dirty->bitmap)
+ iova_bitmap_set(dirty->bitmap, iova, length);
+
+ if (dirty->gather)
+ iommu_iotlb_gather_add_range(dirty->gather, iova, length);
+}
+
/* PCI device grouping function */
extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
extern struct iommu_group *generic_device_group(struct device *dev);
/* FSL-MC device grouping function */
struct iommu_group *fsl_mc_device_group(struct device *dev);
+extern struct iommu_group *generic_single_device_group(struct device *dev);
/**
* struct iommu_fwspec - per-device IOMMU instance data
@@ -662,6 +892,14 @@ struct iommu_fwspec {
struct iommu_sva {
struct device *dev;
struct iommu_domain *domain;
+ struct list_head handle_item;
+ refcount_t users;
+};
+
+struct iommu_mm_data {
+ u32 pasid;
+ struct list_head sva_domains;
+ struct list_head sva_handles;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -692,11 +930,9 @@ static inline void *dev_iommu_priv_get(struct device *dev)
return NULL;
}
-static inline void dev_iommu_priv_set(struct device *dev, void *priv)
-{
- dev->iommu->priv = priv;
-}
+void dev_iommu_priv_set(struct device *dev, void *priv);
+extern struct mutex iommu_probe_device_lock;
int iommu_probe_device(struct device *dev);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
@@ -721,6 +957,8 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
struct iommu_domain *
iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
unsigned int type);
+ioasid_t iommu_alloc_global_pasid(struct device *dev);
+void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -729,6 +967,8 @@ struct iommu_fwspec {};
struct iommu_device {};
struct iommu_fault_param {};
struct iommu_iotlb_gather {};
+struct iommu_dirty_bitmap {};
+struct iommu_dirty_ops {};
static inline bool iommu_present(const struct bus_type *bus)
{
@@ -961,6 +1201,18 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
return false;
}
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
+{
+}
+
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
+{
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
@@ -1082,6 +1334,13 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
{
return NULL;
}
+
+static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void iommu_free_global_pasid(ioasid_t pasid) {}
#endif /* CONFIG_IOMMU_API */
/**
@@ -1094,7 +1353,7 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
* Creates a mapping at @iova for the buffer described by a scatterlist
* stored in the given sg_table object in the provided IOMMU domain.
*/
-static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot)
{
return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
@@ -1165,15 +1424,33 @@ static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream
return false;
}
-#ifdef CONFIG_IOMMU_SVA
+#ifdef CONFIG_IOMMU_MM_DATA
static inline void mm_pasid_init(struct mm_struct *mm)
{
- mm->pasid = IOMMU_PASID_INVALID;
+ /*
+ * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
+ * the new mm and the old one point to a same iommu_mm instance. When either
+ * one of the two mms gets released, the iommu_mm instance is freed, leaving
+ * the other mm running into a use-after-free/double-free problem. To avoid
+ * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
+ */
+ mm->iommu_mm = NULL;
}
+
static inline bool mm_valid_pasid(struct mm_struct *mm)
{
- return mm->pasid != IOMMU_PASID_INVALID;
+ return READ_ONCE(mm->iommu_mm);
}
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+ struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
+
+ if (!iommu_mm)
+ return IOMMU_PASID_INVALID;
+ return iommu_mm->pasid;
+}
+
void mm_pasid_drop(struct mm_struct *mm);
struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm);
@@ -1196,6 +1473,12 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
}
static inline void mm_pasid_init(struct mm_struct *mm) {}
static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+ return IOMMU_PASID_INVALID;
+}
+
static inline void mm_pasid_drop(struct mm_struct *mm) {}
#endif /* CONFIG_IOMMU_SVA */
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index 1129a36a74c4..ffc3a949f837 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -16,14 +16,19 @@ struct page;
struct iommufd_ctx;
struct iommufd_access;
struct file;
+struct iommu_group;
struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id);
void iommufd_device_unbind(struct iommufd_device *idev);
int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id);
+int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id);
void iommufd_device_detach(struct iommufd_device *idev);
+struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
+u32 iommufd_device_to_id(struct iommufd_device *idev);
+
struct iommufd_access_ops {
u8 needs_pin_pages : 1;
void (*unmap)(void *data, unsigned long iova, unsigned long length);
@@ -44,12 +49,16 @@ iommufd_access_create(struct iommufd_ctx *ictx,
const struct iommufd_access_ops *ops, void *data, u32 *id);
void iommufd_access_destroy(struct iommufd_access *access);
int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
+int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id);
+void iommufd_access_detach(struct iommufd_access *access);
void iommufd_ctx_get(struct iommufd_ctx *ictx);
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
+struct iommufd_ctx *iommufd_ctx_from_fd(int fd);
void iommufd_ctx_put(struct iommufd_ctx *ictx);
+bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group);
int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
unsigned long length, struct page **out_pages,
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 2c8860e406bd..19a7b00baff4 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -53,6 +53,7 @@
} \
if (__sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
+ cpu_relax(); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
@@ -73,6 +74,10 @@
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val.
*
+ * This macro does not rely on timekeeping. Hence it is safe to call even when
+ * timekeeping is suspended, at the expense of an underestimation of wall clock
+ * time, which is rather minimal with a non-zero delay_us.
+ *
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
@@ -80,21 +85,30 @@
delay_before_read, args...) \
({ \
u64 __timeout_us = (timeout_us); \
+ s64 __left_ns = __timeout_us * NSEC_PER_USEC; \
unsigned long __delay_us = (delay_us); \
- ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
- if (delay_before_read && __delay_us) \
+ u64 __delay_ns = __delay_us * NSEC_PER_USEC; \
+ if (delay_before_read && __delay_us) { \
udelay(__delay_us); \
+ if (__timeout_us) \
+ __left_ns -= __delay_ns; \
+ } \
for (;;) { \
(val) = op(args); \
if (cond) \
break; \
- if (__timeout_us && \
- ktime_compare(ktime_get(), __timeout) > 0) { \
+ if (__timeout_us && __left_ns < 0) { \
(val) = op(args); \
break; \
} \
- if (__delay_us) \
+ if (__delay_us) { \
udelay(__delay_us); \
+ if (__timeout_us) \
+ __left_ns -= __delay_ns; \
+ } \
+ cpu_relax(); \
+ if (__timeout_us) \
+ __left_ns--; \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 25d768d48970..db7fe25f3370 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -229,7 +229,7 @@ static inline unsigned long resource_ext_type(const struct resource *res)
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
-static inline bool resource_contains(struct resource *r1, struct resource *r2)
+static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
@@ -239,13 +239,13 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2)
}
/* True if any part of r1 overlaps r2 */
-static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2)
{
return r1->start <= r2->end && r1->end >= r2->start;
}
-static inline bool
-resource_intersection(struct resource *r1, struct resource *r2, struct resource *r)
+static inline bool resource_intersection(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
@@ -254,8 +254,8 @@ resource_intersection(struct resource *r1, struct resource *r2, struct resource
return true;
}
-static inline bool
-resource_union(struct resource *r1, struct resource *r2, struct resource *r)
+static inline bool resource_union(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
@@ -331,6 +331,9 @@ extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
+walk_system_ram_res_rev(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 7578d4f6a969..db1249cd9692 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -47,7 +47,30 @@ static inline int task_nice_ioclass(struct task_struct *task)
}
#ifdef CONFIG_BLOCK
-int __get_task_ioprio(struct task_struct *p);
+/*
+ * If the task has set an I/O priority, use that. Otherwise, return
+ * the default I/O priority.
+ *
+ * Expected to be called for current task or with task_lock() held to keep
+ * io_context stable.
+ */
+static inline int __get_task_ioprio(struct task_struct *p)
+{
+ struct io_context *ioc = p->io_context;
+ int prio;
+
+ if (!ioc)
+ return IOPRIO_DEFAULT;
+
+ if (p != current)
+ lockdep_assert_held(&p->alloc_lock);
+
+ prio = ioc->ioprio;
+ if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
+ prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+ task_nice_ioprio(p));
+ return prio;
+}
#else
static inline int __get_task_ioprio(struct task_struct *p)
{
diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h
new file mode 100644
index 000000000000..f0e99fc7dd8b
--- /dev/null
+++ b/include/linux/ioremap.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IOREMAP_H
+#define _LINUX_IOREMAP_H
+
+#include <linux/kasan.h>
+#include <asm/pgtable.h>
+
+#if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP)
+/*
+ * Ioremap often, but not always uses the generic vmalloc area. E.g on
+ * Power ARCH, it could have different ioremap space.
+ */
+#ifndef IOREMAP_START
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+#endif
+static inline bool is_ioremap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)kasan_reset_tag(x);
+
+ return addr >= IOREMAP_START && addr < IOREMAP_END;
+}
+#else
+static inline bool is_ioremap_addr(const void *x)
+{
+ return false;
+}
+#endif
+
+#endif /* _LINUX_IOREMAP_H */
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
index cb71aa616bd3..e3649a6563dd 100644
--- a/include/linux/iosys-map.h
+++ b/include/linux/iosys-map.h
@@ -168,9 +168,9 @@ struct iosys_map {
* about the use of uninitialized variable.
*/
#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \
- struct iosys_map copy = *map_; \
- iosys_map_incr(&copy, offset_); \
- copy; \
+ struct iosys_map copy_ = *map_; \
+ iosys_map_incr(&copy_, offset_); \
+ copy_; \
})
/**
@@ -391,14 +391,14 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* Returns:
* The value read from the mapping.
*/
-#define iosys_map_rd(map__, offset__, type__) ({ \
- type__ val; \
- if ((map__)->is_iomem) { \
- __iosys_map_rd_io(val, (map__)->vaddr_iomem + (offset__), type__);\
- } else { \
- __iosys_map_rd_sys(val, (map__)->vaddr + (offset__), type__); \
- } \
- val; \
+#define iosys_map_rd(map__, offset__, type__) ({ \
+ type__ val_; \
+ if ((map__)->is_iomem) { \
+ __iosys_map_rd_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
+ val_; \
})
/**
@@ -413,20 +413,20 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* or if pointer may be unaligned (and problematic for the architecture
* supported), use iosys_map_memcpy_to()
*/
-#define iosys_map_wr(map__, offset__, type__, val__) ({ \
- type__ val = (val__); \
- if ((map__)->is_iomem) { \
- __iosys_map_wr_io(val, (map__)->vaddr_iomem + (offset__), type__);\
- } else { \
- __iosys_map_wr_sys(val, (map__)->vaddr + (offset__), type__); \
- } \
+#define iosys_map_wr(map__, offset__, type__, val__) ({ \
+ type__ val_ = (val__); \
+ if ((map__)->is_iomem) { \
+ __iosys_map_wr_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
})
/**
* iosys_map_rd_field - Read a member from a struct in the iosys_map
*
* @map__: The iosys_map structure
- * @struct_offset__: Offset from the beggining of the map, where the struct
+ * @struct_offset__: Offset from the beginning of the map, where the struct
* is located
* @struct_type__: The struct describing the layout of the mapping
* @field__: Member of the struct to read
@@ -485,16 +485,16 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* The value read from the mapping.
*/
#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \
- struct_type__ *s; \
+ struct_type__ *s_; \
iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \
- typeof(s->field__)); \
+ typeof(s_->field__)); \
})
/**
* iosys_map_wr_field - Write to a member of a struct in the iosys_map
*
* @map__: The iosys_map structure
- * @struct_offset__: Offset from the beggining of the map, where the struct
+ * @struct_offset__: Offset from the beginning of the map, where the struct
* is located
* @struct_type__: The struct describing the layout of the mapping
* @field__: Member of the struct to read
@@ -508,9 +508,9 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* usage and memory layout.
*/
#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \
- struct_type__ *s; \
+ struct_type__ *s_; \
iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \
- typeof(s->field__), val__); \
+ typeof(s_->field__), val__); \
})
#endif /* __IOSYS_MAP_H__ */
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
new file mode 100644
index 000000000000..270454a6703d
--- /dev/null
+++ b/include/linux/iov_iter.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* I/O iterator iteration building functions.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_IOV_ITER_H
+#define _LINUX_IOV_ITER_H
+
+#include <linux/uio.h>
+#include <linux/bvec.h>
+
+typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+
+/*
+ * Handle ITER_UBUF.
+ */
+static __always_inline
+size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ void __user *base = iter->ubuf;
+ size_t progress = 0, remain;
+
+ remain = step(base + iter->iov_offset, 0, len, priv, priv2);
+ progress = len - remain;
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_IOVEC.
+ */
+static __always_inline
+size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ const struct iovec *p = iter->__iov;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->__iov;
+ iter->__iov = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_KVEC.
+ */
+static __always_inline
+size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct kvec *p = iter->kvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->kvec;
+ iter->kvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_BVEC.
+ */
+static __always_inline
+size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct bio_vec *p = iter->bvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t offset = p->bv_offset + skip, part;
+ void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE);
+
+ part = min3(len,
+ (size_t)(p->bv_len - skip),
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE));
+ remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2);
+ kunmap_local(kaddr);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ if (skip >= p->bv_len) {
+ skip = 0;
+ p++;
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->nr_segs -= p - iter->bvec;
+ iter->bvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_XARRAY.
+ */
+static __always_inline
+size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ struct folio *folio;
+ size_t progress = 0;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ XA_STATE(xas, iter->xarray, index);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ size_t remain, consumed, offset, part, flen;
+
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start + progress);
+ flen = min(folio_size(folio) - offset, len);
+
+ while (flen) {
+ void *base = kmap_local_folio(folio, offset);
+
+ part = min_t(size_t, flen,
+ PAGE_SIZE - offset_in_page(offset));
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+
+ consumed = part - remain;
+ progress += consumed;
+ len -= consumed;
+
+ if (remain || len == 0)
+ goto out;
+ flen -= consumed;
+ offset += consumed;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_DISCARD.
+ */
+static __always_inline
+size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ size_t progress = len;
+
+ iter->count -= progress;
+ return progress;
+}
+
+/**
+ * iterate_and_advance2 - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * Two step functions, @step and @ustep, must be provided, one for handling
+ * mapped kernel addresses and the other is given user addresses which have the
+ * potential to fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_ustep_f ustep, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+
+ if (likely(iter_is_ubuf(iter)))
+ return iterate_ubuf(iter, len, priv, priv2, ustep);
+ if (likely(iter_is_iovec(iter)))
+ return iterate_iovec(iter, len, priv, priv2, ustep);
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
+/**
+ * iterate_and_advance - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * As iterate_and_advance2(), but priv2 is always NULL.
+ */
+static __always_inline
+size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
+ iov_ustep_f ustep, iov_step_f step)
+{
+ return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
+}
+
+#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h
index c006cf0a25f3..1c338f5e5b7a 100644
--- a/include/linux/iova_bitmap.h
+++ b/include/linux/iova_bitmap.h
@@ -7,6 +7,7 @@
#define _IOVA_BITMAP_H_
#include <linux/types.h>
+#include <linux/errno.h>
struct iova_bitmap;
@@ -14,6 +15,7 @@ typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap,
unsigned long iova, size_t length,
void *opaque);
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER)
struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
unsigned long page_size,
u64 __user *data);
@@ -22,5 +24,29 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
iova_bitmap_fn_t fn);
void iova_bitmap_set(struct iova_bitmap *bitmap,
unsigned long iova, size_t length);
+#else
+static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova,
+ size_t length,
+ unsigned long page_size,
+ u64 __user *data)
+{
+ return NULL;
+}
+
+static inline void iova_bitmap_free(struct iova_bitmap *bitmap)
+{
+}
+
+static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length)
+{
+}
+#endif
#endif
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index e1c9eea6015b..9b1434247aab 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_IPC_H
#define _LINUX_IPC_H
-#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
#include <linux/uidgid.h>
#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 839247a4f48e..5e605e384aac 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -33,6 +33,7 @@ struct ipv6_devconf {
__s32 accept_ra_defrtr;
__u32 ra_defrtr_metric;
__s32 accept_ra_min_hop_limit;
+ __s32 accept_ra_min_lft;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -81,6 +82,7 @@ struct ipv6_devconf {
__u32 ioam6_id_wide;
__u8 ioam6_enabled;
__u8 ndisc_evict_nocarrier;
+ __u8 ra_honor_pio_life;
struct ctl_table_header *sysctl_header;
};
@@ -146,6 +148,7 @@ struct inet6_skb_parm {
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
#define IP6SKB_FAKEJUMBO 512
+#define IP6SKB_MULTIPATH 1024
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -199,14 +202,7 @@ struct inet6_cork {
u8 tclass;
};
-/**
- * struct ipv6_pinfo - ipv6 private area
- *
- * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc)
- * this _must_ be the last member, so that inet6_sk_generic
- * is able to calculate its offset from the base struct sock
- * by using the struct proto->slab_obj_size member. -acme
- */
+/* struct ipv6_pinfo - ipv6 private area */
struct ipv6_pinfo {
struct in6_addr saddr;
struct in6_pktinfo sticky_pktinfo;
@@ -218,28 +214,9 @@ struct ipv6_pinfo {
__be32 flow_label;
__u32 frag_size;
- /*
- * Packed in 16bits.
- * Omit one shift by putting the signed field at MSB.
- */
-#if defined(__BIG_ENDIAN_BITFIELD)
- __s16 hop_limit:9;
- __u16 __unused_1:7;
-#else
- __u16 __unused_1:7;
- __s16 hop_limit:9;
-#endif
+ s16 hop_limit;
+ u8 mcast_hops;
-#if defined(__BIG_ENDIAN_BITFIELD)
- /* Packed in 16bits. */
- __s16 mcast_hops:9;
- __u16 __unused_2:6,
- mc_loop:1;
-#else
- __u16 mc_loop:1,
- __unused_2:6;
- __s16 mcast_hops:9;
-#endif
int ucast_oif;
int mcast_oif;
@@ -267,21 +244,11 @@ struct ipv6_pinfo {
} rxopt;
/* sockopt flags */
- __u16 recverr:1,
- sndflow:1,
- repflow:1,
- pmtudisc:3,
- padding:1, /* 1 bit hole */
- srcprefs:3, /* 001: prefer temporary address
+ __u8 srcprefs; /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
- dontfrag:1,
- autoflowlabel:1,
- autoflowlabel_set:1,
- mc_all:1,
- recverr_rfc4884:1,
- rtalert_isolate:1;
+ __u8 pmtudisc;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;
@@ -298,6 +265,18 @@ struct ipv6_pinfo {
struct inet6_cork cork;
};
+/* We currently use available bits from inet_sk(sk)->inet_flags,
+ * this could change in the future.
+ */
+#define inet6_test_bit(nr, sk) \
+ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_set_bit(nr, sk) \
+ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_clear_bit(nr, sk) \
+ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
+
/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
struct raw6_sock {
/* inet_sock has to be the first member of raw6_sock */
@@ -306,19 +285,19 @@ struct raw6_sock {
__u32 offset; /* checksum offset */
struct icmp6_filter filter;
__u32 ip6mr_table;
- /* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
struct udp6_sock {
struct udp_sock udp;
- /* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
struct tcp6_sock {
struct tcp_sock tcp;
- /* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b1b28affb32a..90081afa10ce 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -215,40 +215,40 @@ struct irq_data {
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
* IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
* IRQD_CAN_RESERVE - Can use reservation mode
- * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
- * required
* IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
* from actual interrupt context.
* IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
* irq_chip::irq_set_affinity() when deactivated.
* IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if
* irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
+ * IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which
+ * case it must be resent at the next available opportunity.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
- IRQD_SETAFFINITY_PENDING = (1 << 8),
- IRQD_ACTIVATED = (1 << 9),
- IRQD_NO_BALANCING = (1 << 10),
- IRQD_PER_CPU = (1 << 11),
- IRQD_AFFINITY_SET = (1 << 12),
- IRQD_LEVEL = (1 << 13),
- IRQD_WAKEUP_STATE = (1 << 14),
- IRQD_MOVE_PCNTXT = (1 << 15),
- IRQD_IRQ_DISABLED = (1 << 16),
- IRQD_IRQ_MASKED = (1 << 17),
- IRQD_IRQ_INPROGRESS = (1 << 18),
- IRQD_WAKEUP_ARMED = (1 << 19),
- IRQD_FORWARDED_TO_VCPU = (1 << 20),
- IRQD_AFFINITY_MANAGED = (1 << 21),
- IRQD_IRQ_STARTED = (1 << 22),
- IRQD_MANAGED_SHUTDOWN = (1 << 23),
- IRQD_SINGLE_TARGET = (1 << 24),
- IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
- IRQD_CAN_RESERVE = (1 << 26),
- IRQD_MSI_NOMASK_QUIRK = (1 << 27),
- IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
- IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
- IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
+ IRQD_SETAFFINITY_PENDING = BIT(8),
+ IRQD_ACTIVATED = BIT(9),
+ IRQD_NO_BALANCING = BIT(10),
+ IRQD_PER_CPU = BIT(11),
+ IRQD_AFFINITY_SET = BIT(12),
+ IRQD_LEVEL = BIT(13),
+ IRQD_WAKEUP_STATE = BIT(14),
+ IRQD_MOVE_PCNTXT = BIT(15),
+ IRQD_IRQ_DISABLED = BIT(16),
+ IRQD_IRQ_MASKED = BIT(17),
+ IRQD_IRQ_INPROGRESS = BIT(18),
+ IRQD_WAKEUP_ARMED = BIT(19),
+ IRQD_FORWARDED_TO_VCPU = BIT(20),
+ IRQD_AFFINITY_MANAGED = BIT(21),
+ IRQD_IRQ_STARTED = BIT(22),
+ IRQD_MANAGED_SHUTDOWN = BIT(23),
+ IRQD_SINGLE_TARGET = BIT(24),
+ IRQD_DEFAULT_TRIGGER_SET = BIT(25),
+ IRQD_CAN_RESERVE = BIT(26),
+ IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27),
+ IRQD_AFFINITY_ON_ACTIVATE = BIT(28),
+ IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29),
+ IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -423,29 +423,24 @@ static inline bool irqd_can_reserve(struct irq_data *d)
return __irqd_to_state(d) & IRQD_CAN_RESERVE;
}
-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
-{
- __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
-}
-
-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+static inline void irqd_set_affinity_on_activate(struct irq_data *d)
{
- __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
}
-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+static inline bool irqd_affinity_on_activate(struct irq_data *d)
{
- return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
}
-static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+static inline void irqd_set_resend_when_in_progress(struct irq_data *d)
{
- __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+ __irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS;
}
-static inline bool irqd_affinity_on_activate(struct irq_data *d)
+static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d)
{
- return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
+ return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS;
}
#undef __irqd_to_state
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 8cd11a223260..136f2980cba3 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -66,6 +66,9 @@ void irq_work_sync(struct irq_work *work);
void irq_work_run(void);
bool irq_work_needs_cpu(void);
void irq_work_single(void *arg);
+
+void arch_irq_work_raise(void);
+
#else
static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
deleted file mode 100644
index aa1813749a4f..000000000000
--- a/include/linux/irqchip/mmp.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IRQCHIP_MMP_H
-#define __IRQCHIP_MMP_H
-
-extern struct irq_chip icu_irq_chip;
-
-extern void icu_init_irq(void);
-extern void mmp2_init_icu(void);
-
-#endif /* __IRQCHIP_MMP_H */
diff --git a/include/linux/irqchip/mxs.h b/include/linux/irqchip/mxs.h
deleted file mode 100644
index 4f447e3f0f3a..000000000000
--- a/include/linux/irqchip/mxs.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- */
-
-#ifndef __LINUX_IRQCHIP_MXS_H
-#define __LINUX_IRQCHIP_MXS_H
-
-extern void icoll_handle_irq(struct pt_regs *);
-
-#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 844a8e30e6de..d9451d456a73 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -102,6 +102,9 @@ struct irq_desc {
int parent_irq;
struct module *owner;
const char *name;
+#ifdef CONFIG_HARDIRQS_SW_RESEND
+ struct hlist_node resend_node;
+#endif
} ____cacheline_internodealigned_in_smp;
#ifdef CONFIG_SPARSE_IRQ
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 51c254b7fec2..ee0a82c60508 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -174,7 +174,7 @@ struct irq_domain {
irq_hw_number_t hwirq_max;
unsigned int revmap_size;
struct radix_tree_root revmap_tree;
- struct irq_data __rcu *revmap[];
+ struct irq_data __rcu *revmap[] __counted_by(revmap_size);
};
/* Irq domain flags */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5ec0fa71399e..147feebd508c 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -12,7 +12,9 @@
#ifndef _LINUX_TRACE_IRQFLAGS_H
#define _LINUX_TRACE_IRQFLAGS_H
+#include <linux/irqflags_types.h>
#include <linux/typecheck.h>
+#include <linux/cleanup.h>
#include <asm/irqflags.h>
#include <asm/percpu.h>
@@ -33,19 +35,6 @@
#ifdef CONFIG_TRACE_IRQFLAGS
-/* Per-task IRQ trace events information. */
-struct irqtrace_events {
- unsigned int irq_events;
- unsigned long hardirq_enable_ip;
- unsigned long hardirq_disable_ip;
- unsigned int hardirq_enable_event;
- unsigned int hardirq_disable_event;
- unsigned long softirq_disable_ip;
- unsigned long softirq_enable_ip;
- unsigned int softirq_disable_event;
- unsigned int softirq_enable_event;
-};
-
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
@@ -267,4 +256,10 @@ extern void warn_bogus_irq_restore(void);
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
+DEFINE_LOCK_GUARD_0(irqsave,
+ local_irq_save(_T->flags),
+ local_irq_restore(_T->flags),
+ unsigned long flags)
+
#endif
diff --git a/include/linux/irqflags_types.h b/include/linux/irqflags_types.h
new file mode 100644
index 000000000000..c13f0d915097
--- /dev/null
+++ b/include/linux/irqflags_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQFLAGS_TYPES_H
+#define _LINUX_IRQFLAGS_TYPES_H
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+};
+
+#endif
+
+#endif /* _LINUX_IRQFLAGS_TYPES_H */
diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h
index 790e7fcfc1a6..e2742748104d 100644
--- a/include/linux/iscsi_ibft.h
+++ b/include/linux/iscsi_ibft.h
@@ -21,12 +21,20 @@
*/
extern phys_addr_t ibft_phys_addr;
+#ifdef CONFIG_ISCSI_IBFT_FIND
+
/*
* Routine used to find and reserve the iSCSI Boot Format Table. The
* physical address is set in the ibft_phys_addr variable.
*/
-#ifdef CONFIG_ISCSI_IBFT_FIND
void reserve_ibft_region(void);
+
+/*
+ * Physical bounds to search for the iSCSI Boot Format Table.
+ */
+#define IBFT_START 0x80000 /* 512kB */
+#define IBFT_END 0x100000 /* 1MB */
+
#else
static inline void reserve_ibft_region(void) {}
#endif
diff --git a/include/linux/ism.h b/include/linux/ism.h
index ea2bcdae7401..5428edd90982 100644
--- a/include/linux/ism.h
+++ b/include/linux/ism.h
@@ -44,9 +44,7 @@ struct ism_dev {
u64 local_gid;
int ieq_idx;
- atomic_t free_clients_cnt;
- atomic_t add_dev_cnt;
- wait_queue_head_t waitq;
+ struct ism_client *subs[MAX_CLIENTS];
};
struct ism_event {
@@ -68,9 +66,6 @@ struct ism_client {
*/
void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
/* Private area - don't touch! */
- struct work_struct remove_work;
- struct work_struct add_work;
- struct ism_dev *tgt_ism;
u8 id;
};
@@ -91,7 +86,6 @@ int ism_register_dmb(struct ism_dev *dev, struct ism_dmb *dmb,
int ism_unregister_dmb(struct ism_dev *dev, struct ism_dmb *dmb);
int ism_move(struct ism_dev *dev, u64 dmb_tok, unsigned int idx, bool sf,
unsigned int offset, void *data, unsigned int size);
-u8 *ism_get_seid(void);
const struct smcd_ops *ism_get_smcd_ops(void);
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
index f174ff1b59ee..8f972eaca2ed 100644
--- a/include/linux/iversion.h
+++ b/include/linux/iversion.h
@@ -256,7 +256,7 @@ inode_peek_iversion(const struct inode *inode)
* For filesystems without any sort of change attribute, the best we can
* do is fake one up from the ctime:
*/
-static inline u64 time_to_chattr(struct timespec64 *t)
+static inline u64 time_to_chattr(const struct timespec64 *t)
{
u64 chattr = t->tv_sec;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f619bae1dcc5..971f3e826e15 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -265,8 +265,10 @@ typedef struct journal_superblock_s
__u8 s_padding2[3];
/* 0x0054 */
__be32 s_num_fc_blks; /* Number of fast commit blocks */
-/* 0x0058 */
- __u32 s_padding[41];
+ __be32 s_head; /* blocknr of head of log, only uptodate
+ * while the filesystem is clean */
+/* 0x005C */
+ __u32 s_padding[40];
__be32 s_checksum; /* crc32c(superblock) */
/* 0x0100 */
@@ -274,17 +276,6 @@ typedef struct journal_superblock_s
/* 0x0400 */
} journal_superblock_t;
-/* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */
-#define JBD2_HAS_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001
#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
@@ -623,12 +614,6 @@ struct transaction_s
struct journal_head *t_checkpoint_list;
/*
- * Doubly-linked circular list of all buffers submitted for IO while
- * checkpointing. [j_list_lock]
- */
- struct journal_head *t_checkpoint_io_list;
-
- /*
* Doubly-linked circular list of metadata buffers being
* shadowed by log IO. The IO buffers on the iobuf list and
* the shadow buffers on this list match each other one for
@@ -646,11 +631,6 @@ struct transaction_s
struct list_head t_inode_list;
/*
- * Protects info related to handles
- */
- spinlock_t t_handle_lock;
-
- /*
* Longest time some handle had to wait for running transaction
*/
unsigned long t_max_wait;
@@ -776,11 +756,6 @@ struct journal_s
unsigned long j_flags;
/**
- * @j_atomic_flags: Atomic journaling state flags.
- */
- unsigned long j_atomic_flags;
-
- /**
* @j_errno:
*
* Is there an outstanding uncleared error on the journal (from a prior
@@ -804,11 +779,6 @@ struct journal_s
journal_superblock_t *j_superblock;
/**
- * @j_format_version: Version of the superblock format.
- */
- int j_format_version;
-
- /**
* @j_state_lock: Protect the various scalars in the journal.
*/
rwlock_t j_state_lock;
@@ -911,7 +881,7 @@ struct journal_s
* Journal head shrinker, reclaim buffer's journal head which
* has been written back.
*/
- struct shrinker j_shrinker;
+ struct shrinker *j_shrinker;
/**
* @j_checkpoint_jh_count:
@@ -1024,6 +994,13 @@ struct journal_s
struct block_device *j_fs_dev;
/**
+ * @j_fs_dev_wb_err:
+ *
+ * Records the errseq of the client fs's backing block device.
+ */
+ errseq_t j_fs_dev_wb_err;
+
+ /**
* @j_total_len: Total maximum capacity of the journal region on disk.
*/
unsigned int j_total_len;
@@ -1324,11 +1301,22 @@ struct journal_s
rwsem_release(&j->j_trans_commit_map, _THIS_IP_); \
} while (0)
+/*
+ * We can support any known requested features iff the
+ * superblock is not in version 1. Otherwise we fail to support any
+ * extended sb features.
+ */
+static inline bool jbd2_format_support_feature(journal_t *j)
+{
+ return j->j_superblock->s_header.h_blocktype !=
+ cpu_to_be32(JBD2_SUPERBLOCK_V1);
+}
+
/* journal feature predicate functions */
#define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_compat & \
cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \
} \
@@ -1346,7 +1334,7 @@ static inline void jbd2_clear_feature_##name(journal_t *j) \
#define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_ro_compat & \
cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \
} \
@@ -1364,7 +1352,7 @@ static inline void jbd2_clear_feature_##name(journal_t *j) \
#define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_incompat & \
cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \
} \
@@ -1388,6 +1376,9 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2)
JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
+/* Journal high priority write IO operation flags */
+#define JBD2_JOURNAL_REQ_FLAGS (REQ_META | REQ_SYNC | REQ_IDLE)
+
/*
* Journal flag definitions
*/
@@ -1400,6 +1391,9 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
* data write error in ordered
* mode */
+#define JBD2_CYCLE_RECORD 0x080 /* Journal cycled record log on
+ * clean and empty filesystem
+ * logging area */
#define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */
#define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */
#define JBD2_JOURNAL_FLUSH_DISCARD 0x0001
@@ -1408,12 +1402,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
JBD2_JOURNAL_FLUSH_ZEROOUT)
/*
- * Journal atomic flag definitions
- */
-#define JBD2_CHECKPOINT_IO_ERROR 0x001 /* Detect io error while writing
- * buffer back to disk */
-
-/*
* Function declarations for the journaling transaction and buffer
* management
*/
@@ -1449,6 +1437,7 @@ extern void jbd2_journal_commit_transaction(journal_t *);
void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
+int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
@@ -1705,6 +1694,25 @@ static inline void jbd2_journal_abort_handle(handle_t *handle)
handle->h_aborted = 1;
}
+static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
+
+ /*
+ * Save the original wb_err value of client fs's bdev mapping which
+ * could be used to detect the client fs's metadata async write error.
+ */
+ errseq_check_and_advance(&mapping->wb_err, &journal->j_fs_dev_wb_err);
+}
+
+static inline int jbd2_check_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
+
+ return errseq_check(&mapping->wb_err,
+ READ_ONCE(journal->j_fs_dev_wb_err));
+}
+
#endif /* __KERNEL__ */
/* Comparison functions for transaction IDs: perform comparisons using
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 5e13f801c902..e0ae2a43e0eb 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -72,9 +72,15 @@ extern int register_refined_jiffies(long clock_tick_rate);
#endif
/*
- * The 64-bit value is not atomic - you MUST NOT read it
+ * The 64-bit value is not atomic on 32-bit systems - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
+ *
+ * jiffies and jiffies_64 are at the same address for little-endian systems
+ * and for 64-bit big-endian systems.
+ * On 32-bit big-endian systems, jiffies is the lower 32 bits of jiffies_64
+ * (i.e., at address @jiffies_64 + 4).
+ * See arch/ARCH/kernel/vmlinux.lds.S
*/
extern u64 __cacheline_aligned_in_smp jiffies_64;
extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
@@ -82,6 +88,14 @@ extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffi
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
+/**
+ * get_jiffies_64 - read the 64-bit non-atomic jiffies_64 value
+ *
+ * When BITS_PER_LONG < 64, this uses sequence number sampling using
+ * jiffies_lock to protect the 64-bit read.
+ *
+ * Return: current 64-bit jiffies value
+ */
static inline u64 get_jiffies_64(void)
{
return (u64)jiffies;
@@ -89,39 +103,76 @@ static inline u64 get_jiffies_64(void)
#endif
/*
- * These inlines deal with timer wrapping correctly. You are
- * strongly encouraged to use them
+ * These inlines deal with timer wrapping correctly. You are
+ * strongly encouraged to use them:
* 1. Because people otherwise forget
* 2. Because if the timer wrap changes in future you won't have to
* alter your driver code.
- *
- * time_after(a,b) returns true if the time a is after time b.
+ */
+
+/**
+ * time_after - returns true if the time a is after time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
*
* Do this with "<0" and ">=0" to only test the sign of the result. A
* good compiler would generate better code (and a really good compiler
* wouldn't care). Gcc is currently neither.
+ *
+ * Return: %true is time a is after time b, otherwise %false.
*/
#define time_after(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((b) - (a)) < 0))
+/**
+ * time_before - returns true if the time a is before time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before(a,b) time_after(b,a)
+/**
+ * time_after_eq - returns true if the time a is after or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((a) - (b)) >= 0))
+/**
+ * time_before_eq - returns true if the time a is before or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq(a,b) time_after_eq(b,a)
-/*
- * Calculate whether a is in the range of [b, c].
+/**
+ * time_in_range - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
*/
#define time_in_range(a,b,c) \
(time_after_eq(a,b) && \
time_before_eq(a,c))
-/*
- * Calculate whether a is in the range of [b, c).
+/**
+ * time_in_range_open - Calculate whether a is in the range of [b, c).
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c), otherwise %false.
*/
#define time_in_range_open(a,b,c) \
(time_after_eq(a,b) && \
@@ -129,45 +180,138 @@ static inline u64 get_jiffies_64(void)
/* Same as above, but does so with platform independent 64bit types.
* These must be used when utilizing jiffies_64 (i.e. return value of
- * get_jiffies_64() */
+ * get_jiffies_64()). */
+
+/**
+ * time_after64 - returns true if the time a is after time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after time b, otherwise %false.
+ */
#define time_after64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((b) - (a)) < 0))
+/**
+ * time_before64 - returns true if the time a is before time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before64(a,b) time_after64(b,a)
+/**
+ * time_after_eq64 - returns true if the time a is after or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((a) - (b)) >= 0))
+/**
+ * time_before_eq64 - returns true if the time a is before or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq64(a,b) time_after_eq64(b,a)
+/**
+ * time_in_range64 - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
+ */
#define time_in_range64(a, b, c) \
(time_after_eq64(a, b) && \
time_before_eq64(a, c))
/*
- * These four macros compare jiffies and 'a' for convenience.
+ * These eight macros compare jiffies[_64] and 'a' for convenience.
*/
-/* time_is_before_jiffies(a) return true if a is before jiffies */
+/**
+ * time_is_before_jiffies - return true if a is before jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before jiffies, otherwise %false.
+ */
#define time_is_before_jiffies(a) time_after(jiffies, a)
+/**
+ * time_is_before_jiffies64 - return true if a is before jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before jiffies_64, otherwise %false.
+ */
#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
-/* time_is_after_jiffies(a) return true if a is after jiffies */
+/**
+ * time_is_after_jiffies - return true if a is after jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after jiffies, otherwise %false.
+ */
#define time_is_after_jiffies(a) time_before(jiffies, a)
+/**
+ * time_is_after_jiffies64 - return true if a is after jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after jiffies_64, otherwise %false.
+ */
#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a)
-/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
+/**
+ * time_is_before_eq_jiffies - return true if a is before or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before or the same as jiffies, otherwise %false.
+ */
#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+/**
+ * time_is_before_eq_jiffies64 - return true if a is before or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before or the same jiffies_64, otherwise %false.
+ */
#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a)
-/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
+/**
+ * time_is_after_eq_jiffies - return true if a is after or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after or the same as jiffies, otherwise %false.
+ */
#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+/**
+ * time_is_after_eq_jiffies64 - return true if a is after or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after or the same as jiffies_64, otherwise %false.
+ */
#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a)
/*
- * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * Have the 32-bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -278,7 +422,7 @@ extern unsigned long preset_lpj;
#if BITS_PER_LONG < 64
# define MAX_SEC_IN_JIFFIES \
(long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
-#else /* take care of overflow on 64 bits machines */
+#else /* take care of overflow on 64-bit machines */
# define MAX_SEC_IN_JIFFIES \
(SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
@@ -290,6 +434,12 @@ extern unsigned long preset_lpj;
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);
+/**
+ * jiffies_to_nsecs - Convert jiffies to nanoseconds
+ * @j: jiffies value
+ *
+ * Return: nanoseconds value
+ */
static inline u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
@@ -353,12 +503,14 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __msecs_to_jiffies() is called if the value passed does not
+ * code. __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * The HZ range specific helpers _msecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
{
@@ -400,12 +552,14 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
*
* usecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __usecs_to_jiffies() is called if the value passed does not
+ * code. __usecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * The HZ range specific helpers _usecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
@@ -422,6 +576,7 @@ extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
extern clock_t jiffies_to_clock_t(unsigned long x);
+
static inline clock_t jiffies_delta_to_clock_t(long delta)
{
return jiffies_to_clock_t(max(0L, delta));
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 4e968ebadce6..f0a949b7c973 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -257,7 +257,7 @@ extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
static __always_inline int static_key_count(struct static_key *key)
{
- return arch_atomic_read(&key->enabled);
+ return raw_atomic_read(&key->enabled);
}
static __always_inline void jump_label_init(void)
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index fe3c9993b5bf..c3f075e8f60c 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -65,6 +65,9 @@ static inline void *dereference_symbol_descriptor(void *ptr)
return ptr;
}
+/* How and when do we show kallsyms values? */
+extern bool kallsyms_show_value(const struct cred *cred);
+
#ifdef CONFIG_KALLSYMS
unsigned long kallsyms_sym_address(int idx);
int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
@@ -93,10 +96,6 @@ extern int sprint_backtrace(char *buffer, unsigned long address);
extern int sprint_backtrace_build_id(char *buffer, unsigned long address);
int lookup_symbol_name(unsigned long addr, char *symname);
-int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
-
-/* How and when do we show kallsyms values? */
-extern bool kallsyms_show_value(const struct cred *cred);
#else /* !CONFIG_KALLSYMS */
@@ -155,16 +154,6 @@ static inline int lookup_symbol_name(unsigned long addr, char *symname)
return -ERANGE;
}
-static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
-{
- return -ERANGE;
-}
-
-static inline bool kallsyms_show_value(const struct cred *cred)
-{
- return false;
-}
-
static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
void *data)
{
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index f7ef70661ce2..dbb06d789e74 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -4,6 +4,7 @@
#include <linux/bug.h>
#include <linux/kasan-enabled.h>
+#include <linux/kasan-tags.h>
#include <linux/kernel.h>
#include <linux/static_key.h>
#include <linux/types.h>
@@ -54,11 +55,13 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
+#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ KASAN_SHADOW_OFFSET;
}
+#endif
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
@@ -127,20 +130,39 @@ static __always_inline void kasan_poison_slab(struct slab *slab)
__kasan_poison_slab(slab);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
+ */
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_unpoison_object_data(cache, object);
+ __kasan_unpoison_new_object(cache, object);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_poison_object_data(cache, object);
+ __kasan_poison_new_object(cache, object);
}
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
@@ -170,13 +192,6 @@ static __always_inline void kasan_kfree_large(void *ptr)
__kasan_kfree_large(ptr, _RET_IP_);
}
-void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr)
-{
- if (kasan_enabled())
- __kasan_slab_free_mempool(ptr, _RET_IP_);
-}
-
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
void *object, gfp_t flags, bool init);
static __always_inline void * __must_check kasan_slab_alloc(
@@ -217,6 +232,113 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object;
}
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function is similar to kasan_mempool_poison_object() but operates on
+ * page allocations.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_pages().
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_pages(page, order, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function unpoisons a page allocation that was previously poisoned by
+ * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
+ * the tag-based modes, this function assigns a new tag to the allocation.
+ */
+static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
+/**
+ * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function poisons a slab allocation and saves a free stack trace for it
+ * without initializing the allocation's memory and without putting it into the
+ * quarantine (for the Generic mode).
+ *
+ * This function also performs checks to detect double-free and invalid-free
+ * bugs and reports them. The caller can use the return value of this function
+ * to find out if the allocation is buggy.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_object().
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_object(void *ptr)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_object(ptr, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
+/**
+ * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ * @size: Size to be unpoisoned.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function unpoisons a slab allocation that was previously poisoned via
+ * kasan_mempool_poison_object() and saves an alloc stack trace for it without
+ * initializing the allocation's memory. For the tag-based modes, this function
+ * does not assign a new tag to the allocation and instead restores the
+ * original tags based on the pointer value.
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ */
+static __always_inline void kasan_mempool_unpoison_object(void *ptr,
+ size_t size)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
+}
+
/*
* Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
* the hardware tag-based mode that doesn't rely on compiler instrumentation.
@@ -240,9 +362,9 @@ static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
return false;
}
static inline void kasan_poison_slab(struct slab *slab) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object) {}
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
@@ -254,7 +376,6 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init
return false;
}
static inline void kasan_kfree_large(void *ptr) {}
-static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags, bool init)
{
@@ -274,6 +395,17 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
+static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
+static inline bool kasan_mempool_poison_object(void *ptr)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
+
static inline bool kasan_check_byte(const void *address)
{
return true;
@@ -283,8 +415,10 @@ static inline bool kasan_check_byte(const void *address)
#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
void kasan_unpoison_task_stack(struct task_struct *task);
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
#else
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
#endif
#ifdef CONFIG_KASAN_GENERIC
@@ -343,7 +477,7 @@ static inline void *kasan_reset_tag(const void *addr)
* @is_write: whether the bad access is a write or a read
* @ip: instruction pointer for the accessibility check or the bad access itself
*/
-bool kasan_report(unsigned long addr, size_t size,
+bool kasan_report(const void *addr, size_t size,
bool is_write, unsigned long ip);
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
@@ -464,10 +598,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_non_canonical_hook(unsigned long addr);
-#else /* CONFIG_KASAN_INLINE */
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
-#endif /* CONFIG_KASAN_INLINE */
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index ee04256f28af..b851ba415e03 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -72,6 +72,23 @@ static inline void kcov_remote_stop_softirq(void)
kcov_remote_stop();
}
+#ifdef CONFIG_64BIT
+typedef unsigned long kcov_u64;
+#else
+typedef unsigned long long kcov_u64;
+#endif
+
+void __sanitizer_cov_trace_pc(void);
+void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2);
+void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2);
+void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2);
+void __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2);
+void __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2);
+void __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2);
+void __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2);
+void __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2);
+void __sanitizer_cov_trace_switch(kcov_u64 val, void *cases);
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 07dfb6a20a1c..f6c2ddb16b95 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -196,6 +196,8 @@ int kdb_process_cpu(const struct task_struct *p)
return cpu;
}
+extern void kdb_send_sig(struct task_struct *p, int sig);
+
#ifdef CONFIG_KALLSYMS
extern const char *kdb_walk_kallsyms(loff_t *pos);
#else /* ! CONFIG_KALLSYMS */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 0d91e0af0125..d9ad21058eed 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -13,6 +13,7 @@
#include <linux/stdarg.h>
#include <linux/align.h>
+#include <linux/array_size.h>
#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
@@ -29,6 +30,7 @@
#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
+#include <linux/sprintf.h>
#include <linux/static_call_types.h>
#include <linux/instruction_pointer.h>
#include <asm/byteorder.h>
@@ -49,12 +51,6 @@
#define READ 0
#define WRITE 1
-/**
- * ARRAY_SIZE - get the number of elements in array @arr
- * @arr: array to be sized
- */
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
-
#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
#define u64_to_user_ptr(x) ( \
@@ -203,35 +199,6 @@ static inline void might_fault(void) { }
void do_exit(long error_code) __noreturn;
-extern int num_to_str(char *buf, int size,
- unsigned long long num, unsigned int width);
-
-/* lib/printf utilities */
-
-extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
-extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
-extern __printf(3, 4)
-int snprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(3, 4)
-int scnprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(2, 3) __malloc
-char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern __printf(2, 0) __malloc
-char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
-extern __printf(2, 0)
-const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
-
-extern __scanf(2, 3)
-int sscanf(const char *, const char *, ...);
-extern __scanf(2, 0)
-int vsscanf(const char *, const char *, va_list);
-
-extern int no_hash_pointers_enable(char *str);
-
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
@@ -457,13 +424,6 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/* This counts to 12. Any more, it will return 13th argument. */
-#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
-#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-
-#define __CONCAT(a, b) a ## b
-#define CONCATENATE(a, b) __CONCAT(a, b)
-
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 73f5c120def8..99aaa050ccb7 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -316,6 +316,7 @@ struct kernfs_ops {
struct poll_table_struct *pt);
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
+ loff_t (*llseek)(struct kernfs_open_file *of, loff_t offset, int whence);
};
/*
@@ -550,6 +551,10 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
const struct iattr *iattr)
{ return -ENOSYS; }
+static inline __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt)
+{ return -ENOSYS; }
+
static inline void kernfs_notify(struct kernfs_node *kn) { }
static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 22b5cd24f581..400cb6c02176 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -22,10 +22,6 @@
#include <uapi/linux/kexec.h>
#include <linux/verification.h>
-/* Location of a reserved region to hold the crash kernel.
- */
-extern struct resource crashk_res;
-extern struct resource crashk_low_res;
extern note_buf_t __percpu *crash_notes;
#ifdef CONFIG_KEXEC_CORE
@@ -33,6 +29,7 @@ extern note_buf_t __percpu *crash_notes;
#include <linux/compat.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/highmem.h>
#include <asm/kexec.h>
/* Verify architecture specific macros are defined */
@@ -230,21 +227,6 @@ static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
}
#endif
-/* Alignment required for elf header segment */
-#define ELF_CORE_HEADER_ALIGN 4096
-
-struct crash_mem {
- unsigned int max_nr_ranges;
- unsigned int nr_ranges;
- struct range ranges[];
-};
-
-extern int crash_exclude_mem_range(struct crash_mem *mem,
- unsigned long long mstart,
- unsigned long long mend);
-extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
- void **addr, unsigned long *sz);
-
#ifndef arch_kexec_apply_relocations_add
/*
* arch_kexec_apply_relocations_add - apply relocations of type RELA
@@ -334,6 +316,10 @@ struct kimage {
unsigned int preserve_context : 1;
/* If set, we are using file mode kexec syscall */
unsigned int file_mode:1;
+#ifdef CONFIG_CRASH_HOTPLUG
+ /* If set, allow changes to elfcorehdr of kexec_load'd image */
+ unsigned int update_elfcorehdr:1;
+#endif
#ifdef ARCH_HAS_KIMAGE_ARCH
struct kimage_arch arch;
@@ -360,6 +346,12 @@ struct kimage {
struct purgatory_info purgatory_info;
#endif
+#ifdef CONFIG_CRASH_HOTPLUG
+ int hp_action;
+ int elfcorehdr_index;
+ bool elfcorehdr_updated;
+#endif
+
#ifdef CONFIG_IMA_KEXEC
/* Virtual address of IMA measurement buffer for kexec syscall */
void *ima_buffer;
@@ -404,14 +396,14 @@ bool kexec_load_permitted(int kexec_image_type);
/* List of defined/legal kexec flags */
#ifndef CONFIG_KEXEC_JUMP
-#define KEXEC_FLAGS KEXEC_ON_CRASH
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR)
#else
-#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR)
#endif
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
- KEXEC_FILE_NO_INITRAMFS)
+ KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG)
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
@@ -490,6 +482,31 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
#endif
+#ifndef arch_crash_handle_hotplug_event
+static inline void arch_crash_handle_hotplug_event(struct kimage *image) { }
+#endif
+
+int crash_check_update_elfcorehdr(void);
+
+#ifndef crash_hotplug_cpu_support
+static inline int crash_hotplug_cpu_support(void) { return 0; }
+#endif
+
+#ifndef crash_hotplug_memory_support
+static inline int crash_hotplug_memory_support(void) { return 0; }
+#endif
+
+#ifndef crash_get_elfcorehdr_size
+static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
+#endif
+
+extern bool kexec_file_dbg_print;
+
+#define kexec_dprintk(fmt, ...) \
+ printk("%s" fmt, \
+ kexec_file_dbg_print ? KERN_INFO : KERN_DEBUG, \
+ ##__VA_ARGS__)
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 7d985a1dfe4a..5caf3ce82373 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -73,6 +73,7 @@ struct key_type {
unsigned int flags;
#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
+#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */
/* vet a description */
int (*vet_description)(const char *description);
diff --git a/include/linux/key.h b/include/linux/key.h
index 8dc7f7c3088b..943a432da3ae 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -490,9 +490,6 @@ do { \
rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \
} while (0)
-#ifdef CONFIG_SYSCTL
-extern struct ctl_table key_sysctls[];
-#endif
/*
* the userspace interface
*/
@@ -518,6 +515,7 @@ extern void key_init(void);
#define key_init() do { } while(0)
#define key_free_user_ns(ns) do { } while(0)
#define key_remove_domain(d) do { } while(0)
+#define key_lookup(k) NULL
#endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 726857a4b680..88100cc9caba 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -59,15 +59,16 @@ static __always_inline bool is_kfence_address(const void *addr)
}
/**
- * kfence_alloc_pool() - allocate the KFENCE pool via memblock
+ * kfence_alloc_pool_and_metadata() - allocate the KFENCE pool and KFENCE
+ * metadata via memblock
*/
-void __init kfence_alloc_pool(void);
+void __init kfence_alloc_pool_and_metadata(void);
/**
* kfence_init() - perform KFENCE initialization at boot time
*
- * Requires that kfence_alloc_pool() was called before. This sets up the
- * allocation gate timer, and requires that workqueues are available.
+ * Requires that kfence_alloc_pool_and_metadata() was called before. This sets
+ * up the allocation gate timer, and requires that workqueues are available.
*/
void __init kfence_init(void);
@@ -222,8 +223,10 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
#else /* CONFIG_KFENCE */
+#define kfence_sample_interval (0)
+
static inline bool is_kfence_address(const void *addr) { return false; }
-static inline void kfence_alloc_pool(void) { }
+static inline void kfence_alloc_pool_and_metadata(void) { }
static inline void kfence_init(void) { }
static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 258cdde8d356..76e891ee9e37 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -365,5 +365,6 @@ extern void kgdb_free_init_mem(void);
#define dbg_late_init()
static inline void kgdb_panic(const char *msg) {}
static inline void kgdb_free_init_mem(void) { }
+static inline int kgdb_nmicallback(int cpu, void *regs) { return 1; }
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
index 8bfa6c98176d..929287981afe 100644
--- a/include/linux/kmsan_types.h
+++ b/include/linux/kmsan_types.h
@@ -9,6 +9,8 @@
#ifndef _LINUX_KMSAN_TYPES_H
#define _LINUX_KMSAN_TYPES_H
+#include <linux/types.h>
+
/* These constants are defined in the MSan LLVM instrumentation pass. */
#define KMSAN_RETVAL_SIZE 800
#define KMSAN_PARAM_SIZE 800
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index c392c811d9ad..c30affcc43b4 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -69,14 +69,16 @@ struct kobject {
const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
-#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- struct delayed_work release;
-#endif
+
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
unsigned int uevent_suppress:1;
+
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ struct delayed_work release;
+#endif
};
__printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 85a64cb95d75..0ff44d6633e3 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -26,8 +26,7 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
-#include <linux/refcount.h>
-#include <linux/freelist.h>
+#include <linux/objpool.h>
#include <linux/rethook.h>
#include <asm/kprobes.h>
@@ -140,8 +139,8 @@ static inline bool kprobe_ftrace(struct kprobe *p)
*
*/
struct kretprobe_holder {
- struct kretprobe *rp;
- refcount_t ref;
+ struct kretprobe __rcu *rp;
+ struct objpool_head pool;
};
struct kretprobe {
@@ -154,7 +153,6 @@ struct kretprobe {
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
struct rethook *rh;
#else
- struct freelist_head freelist;
struct kretprobe_holder *rph;
#endif
};
@@ -165,10 +163,7 @@ struct kretprobe_instance {
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
struct rethook_node node;
#else
- union {
- struct freelist_node freelist;
- struct rcu_head rcu;
- };
+ struct rcu_head rcu;
struct llist_node llist;
struct kretprobe_holder *rph;
kprobe_opcode_t *ret_addr;
@@ -202,10 +197,8 @@ extern int arch_trampoline_kprobe(struct kprobe *p);
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
- "Kretprobe is accessed from instance under preemptive context");
-
- return (struct kretprobe *)READ_ONCE(ri->node.rethook->data);
+ /* rethook::data is non-changed field, so that you can access it freely. */
+ return (struct kretprobe *)ri->node.rethook->data;
}
static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
{
@@ -250,10 +243,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
- "Kretprobe is accessed from instance under preemptive context");
-
- return READ_ONCE(ri->rph->rp);
+ return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
}
static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
@@ -450,6 +440,10 @@ int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
char *type, char *sym);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
#else /* !CONFIG_KPROBES: */
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 899a314bc487..401348e9f92b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -26,6 +26,22 @@ int ksm_disable(struct mm_struct *mm);
int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
+/*
+ * To identify zeropages that were mapped by KSM, we reuse the dirty bit
+ * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
+ * deduplicating memory.
+ */
+#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
+
+extern unsigned long ksm_zero_pages;
+
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+{
+ if (is_ksm_zero_pte(pte)) {
+ ksm_zero_pages--;
+ mm->ksm_zero_pages--;
+ }
+}
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
@@ -60,8 +76,8 @@ static inline void ksm_exit(struct mm_struct *mm)
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
@@ -95,6 +111,10 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+{
+}
+
#ifdef CONFIG_MEMORY_FAILURE
static inline void collect_procs_ksm(struct page *page,
struct list_head *to_kill, int force_early)
@@ -109,10 +129,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- return page;
+ return folio;
}
static inline void rmap_walk_ksm(struct folio *folio,
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
index 529974e22ea7..7fcf29a4e0de 100644
--- a/include/linux/kstrtox.h
+++ b/include/linux/kstrtox.h
@@ -147,9 +147,4 @@ extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
-static inline int strtobool(const char *s, bool *res)
-{
- return kstrtobool(s, res);
-}
-
#endif /* _LINUX_KSTRTOX_H */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 30e5bec81d2b..b11f53c1ba2e 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -86,9 +86,10 @@ void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
+int kthread_stop_put(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
-bool __kthread_should_park(struct task_struct *k);
+bool kthread_should_stop_or_park(void);
bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 73f20deb497d..3a4e723eae0f 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -21,12 +21,10 @@
#ifndef _LINUX_KTIME_H
#define _LINUX_KTIME_H
-#include <linux/time.h>
-#include <linux/jiffies.h>
#include <asm/bug.h>
-
-/* Nanosecond scalar representation for kernel time values */
-typedef s64 ktime_t;
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/types.h>
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0e571e973bc2..179df96b20f8 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -80,8 +80,8 @@
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
-#ifndef KVM_ADDRESS_SPACE_NUM
-#define KVM_ADDRESS_SPACE_NUM 1
+#ifndef KVM_MAX_NR_ADDRESS_SPACES
+#define KVM_MAX_NR_ADDRESS_SPACES 1
#endif
/*
@@ -190,8 +190,6 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except);
-bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
- unsigned long *vcpu_bitmap);
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -255,12 +253,17 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
+union kvm_mmu_notifier_arg {
+ pte_t pte;
+ unsigned long attributes;
+};
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
- pte_t pte;
+ union kvm_mmu_notifier_arg arg;
bool may_block;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -586,8 +589,20 @@ struct kvm_memory_slot {
u32 flags;
short id;
u16 as_id;
+
+#ifdef CONFIG_KVM_PRIVATE_MEM
+ struct {
+ struct file __rcu *file;
+ pgoff_t pgoff;
+ } gmem;
+#endif
};
+static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot)
+{
+ return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
+}
+
static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
{
return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
@@ -662,7 +677,7 @@ struct kvm_irq_routing_table {
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
- struct hlist_head map[];
+ struct hlist_head map[] __counted_by(nr_rt_entries);
};
#endif
@@ -675,13 +690,29 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
#define KVM_MEM_SLOTS_NUM SHRT_MAX
#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
-#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
+#if KVM_MAX_NR_ADDRESS_SPACES == 1
+static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
+{
+ return KVM_MAX_NR_ADDRESS_SPACES;
+}
+
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
return 0;
}
#endif
+/*
+ * Arch code must define kvm_arch_has_private_mem if support for private memory
+ * is enabled.
+ */
+#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_PRIVATE_MEM)
+static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
struct kvm_memslots {
u64 generation;
atomic_long_t last_used_slot;
@@ -719,9 +750,9 @@ struct kvm {
struct mm_struct *mm; /* userspace tied to this vm */
unsigned long nr_memslot_pages;
/* The two memslot sets - active and inactive (per address space) */
- struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
+ struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
/* The current active memslot set for each address space */
- struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
+ struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
struct xarray vcpu_array;
/*
* Protected by slots_lock, but can be read outside if an
@@ -751,7 +782,7 @@ struct kvm {
struct list_head vm_list;
struct mutex lock;
struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
-#ifdef CONFIG_HAVE_KVM_EVENTFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
struct {
spinlock_t lock;
struct list_head items;
@@ -759,8 +790,8 @@ struct kvm {
struct list_head resampler_list;
struct mutex resampler_lock;
} irqfds;
- struct list_head ioeventfds;
#endif
+ struct list_head ioeventfds;
struct kvm_vm_stat stat;
struct kvm_arch arch;
refcount_t users_count;
@@ -776,17 +807,16 @@ struct kvm {
* Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
-#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+
struct hlist_head irq_ack_notifier_list;
#endif
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
struct mmu_notifier mmu_notifier;
unsigned long mmu_invalidate_seq;
long mmu_invalidate_in_progress;
- unsigned long mmu_invalidate_range_start;
- unsigned long mmu_invalidate_range_end;
+ gfn_t mmu_invalidate_range_start;
+ gfn_t mmu_invalidate_range_end;
#endif
struct list_head devices;
u64 manual_dirty_log_protect;
@@ -805,6 +835,10 @@ struct kvm {
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
struct notifier_block pm_notifier;
#endif
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ /* Protected by slots_locks (for writes) and RCU (for reads) */
+ struct xarray mem_attr_array;
+#endif
char stats_id[KVM_STATS_NAME_SIZE];
};
@@ -849,7 +883,7 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
#define KVM_BUG(cond, kvm, fmt...) \
({ \
- int __ret = (cond); \
+ bool __ret = !!(cond); \
\
if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
kvm_vm_bugged(kvm); \
@@ -858,13 +892,32 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
#define KVM_BUG_ON(cond, kvm) \
({ \
- int __ret = (cond); \
+ bool __ret = !!(cond); \
\
if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
kvm_vm_bugged(kvm); \
unlikely(__ret); \
})
+/*
+ * Note, "data corruption" refers to corruption of host kernel data structures,
+ * not guest data. Guest data corruption, suspected or confirmed, that is tied
+ * and contained to a single VM should *never* BUG() and potentially panic the
+ * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
+ * is corrupted and that corruption can have a cascading effect to other parts
+ * of the hosts and/or to other VMs.
+ */
+#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
+ BUG_ON(__ret); \
+ else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PROVE_RCU
@@ -944,7 +997,7 @@ static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
}
#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
@@ -968,7 +1021,7 @@ void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
+ as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock) ||
!refcount_read(&kvm->users_count));
@@ -991,6 +1044,8 @@ static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
return RB_EMPTY_ROOT(&slots->gfn_tree);
}
+bool kvm_are_all_memslots_empty(struct kvm *kvm);
+
#define kvm_for_each_memslot(memslot, bkt, slots) \
hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
if (WARN_ON_ONCE(!memslot->npages)) { \
@@ -1123,9 +1178,9 @@ enum kvm_mr_change {
};
int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region2 *mem);
int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+ const struct kvm_userspace_memory_region2 *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -1357,6 +1412,9 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
+void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
@@ -1366,10 +1424,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif
-void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
- unsigned long end);
-void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
- unsigned long end);
+void kvm_mmu_invalidate_begin(struct kvm *kvm);
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
+void kvm_mmu_invalidate_end(struct kvm *kvm);
+bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -1385,10 +1443,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
unsigned long mask);
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
-#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot);
-#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot);
@@ -1477,11 +1532,23 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
}
#endif
-#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
return -ENOTSUPP;
}
+#else
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
+#endif
+
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+ gfn_t gfn, u64 nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+#else
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
#endif
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
@@ -1912,7 +1979,7 @@ extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
extern const struct kvm_stats_header kvm_vcpu_stats_header;
extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
if (unlikely(kvm->mmu_invalidate_in_progress))
@@ -1935,9 +2002,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
return 0;
}
-static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
unsigned long mmu_seq,
- unsigned long hva)
+ gfn_t gfn)
{
lockdep_assert_held(&kvm->mmu_lock);
/*
@@ -1946,14 +2013,50 @@ static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
* that might be being invalidated. Note that it may include some false
* positives, due to shortcuts when handing concurrent invalidations.
*/
- if (unlikely(kvm->mmu_invalidate_in_progress) &&
- hva >= kvm->mmu_invalidate_range_start &&
- hva < kvm->mmu_invalidate_range_end)
- return 1;
+ if (unlikely(kvm->mmu_invalidate_in_progress)) {
+ /*
+ * Dropping mmu_lock after bumping mmu_invalidate_in_progress
+ * but before updating the range is a KVM bug.
+ */
+ if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
+ kvm->mmu_invalidate_range_end == INVALID_GPA))
+ return 1;
+
+ if (gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return 1;
+ }
+
if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
+
+/*
+ * This lockless version of the range-based retry check *must* be paired with a
+ * call to the locked version after acquiring mmu_lock, i.e. this is safe to
+ * use only as a pre-check to avoid contending mmu_lock. This version *will*
+ * get false negatives and false positives.
+ */
+static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
+ unsigned long mmu_seq,
+ gfn_t gfn)
+{
+ /*
+ * Use READ_ONCE() to ensure the in-progress flag and sequence counter
+ * are always read from memory, e.g. so that checking for retry in a
+ * loop won't result in an infinite retry loop. Don't force loads for
+ * start+end, as the key to avoiding infinite retry loops is observing
+ * the 1=>0 transition of in-progress, i.e. getting false negatives
+ * due to stale start+end values is acceptable.
+ */
+ if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
+ gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return true;
+
+ return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
+}
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
@@ -1978,12 +2081,10 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
-#ifdef CONFIG_HAVE_KVM_EVENTFD
-
void kvm_eventfd_init(struct kvm *kvm);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
bool kvm_notify_irqfd_resampler(struct kvm *kvm,
@@ -2004,31 +2105,7 @@ static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
{
return false;
}
-#endif
-
-#else
-
-static inline void kvm_eventfd_init(struct kvm *kvm) {}
-
-static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
-{
- return -EINVAL;
-}
-
-static inline void kvm_irqfd_release(struct kvm *kvm) {}
-
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
-static inline void kvm_irq_routing_update(struct kvm *kvm)
-{
-}
-#endif
-
-static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_HAVE_KVM_EVENTFD */
+#endif /* CONFIG_HAVE_KVM_IRQCHIP */
void kvm_arch_irq_routing_update(struct kvm *kvm);
@@ -2146,8 +2223,6 @@ struct kvm_device_ops {
int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};
-void kvm_device_get(struct kvm_device *dev);
-void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
@@ -2237,9 +2312,6 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
-
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
@@ -2288,4 +2360,57 @@ static inline void kvm_account_pgtable_pages(void *virt, int nr)
/* Max number of entries allowed for each kvm dirty ring */
#define KVM_DIRTY_RING_MAX_ENTRIES 65536
+static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
+ gpa_t gpa, gpa_t size,
+ bool is_write, bool is_exec,
+ bool is_private)
+{
+ vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
+ vcpu->run->memory_fault.gpa = gpa;
+ vcpu->run->memory_fault.size = size;
+
+ /* RWX flags are not (yet) defined or communicated to userspace. */
+ vcpu->run->memory_fault.flags = 0;
+ if (is_private)
+ vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
+}
+
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
+{
+ return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
+}
+
+bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ unsigned long attrs);
+bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return IS_ENABLED(CONFIG_KVM_PRIVATE_MEM) &&
+ kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
+}
+#else
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return false;
+}
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
+#ifdef CONFIG_KVM_PRIVATE_MEM
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+#else
+static inline int kvm_gmem_get_pfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ kvm_pfn_t *pfn, int *max_order)
+{
+ KVM_BUG_ON(1, kvm);
+ return -EIO;
+}
+#endif /* CONFIG_KVM_PRIVATE_MEM */
+
#endif
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 6f4737d5046a..9d1f7835d8c1 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -6,6 +6,7 @@
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
+struct kvm_gfn_range;
struct kvm_interrupt;
struct kvm_irq_routing_table;
struct kvm_memory_slot;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index c39bbf17a25b..4754b02d3a2c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -100,6 +100,7 @@ struct led_classdev {
const char *name;
unsigned int brightness;
unsigned int max_brightness;
+ unsigned int color;
int flags;
/* Lower 16 bits reflect status */
@@ -124,6 +125,10 @@ struct led_classdev {
#define LED_BLINK_INVERT 3
#define LED_BLINK_BRIGHTNESS_CHANGE 4
#define LED_BLINK_DISABLE 5
+ /* Brightness off also disables hw-blinking so it is a separate action */
+#define LED_SET_BRIGHTNESS_OFF 6
+#define LED_SET_BRIGHTNESS 7
+#define LED_SET_BLINK 8
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
@@ -147,6 +152,10 @@ struct led_classdev {
* match the values specified exactly.
* Deactivate blinking again when the brightness is set to LED_OFF
* via the brightness_set() callback.
+ * For led_blink_set_nosleep() the LED core assumes that blink_set
+ * implementations, of drivers which do not use brightness_set_blocking,
+ * will not sleep. Therefor if brightness_set_blocking is not set
+ * this function must not sleep!
*/
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
@@ -170,6 +179,8 @@ struct led_classdev {
struct work_struct set_brightness_work;
int delayed_set_value;
+ unsigned long delayed_delay_on;
+ unsigned long delayed_delay_off;
#ifdef CONFIG_LEDS_TRIGGERS
/* Protects the trigger data below */
@@ -183,6 +194,49 @@ struct led_classdev {
/* LEDs that have private triggers have this set */
struct led_hw_trigger_type *trigger_type;
+
+ /* Unique trigger name supported by LED set in hw control mode */
+ const char *hw_control_trigger;
+ /*
+ * Check if the LED driver supports the requested mode provided by the
+ * defined supported trigger to setup the LED to hw control mode.
+ *
+ * Return 0 on success. Return -EOPNOTSUPP when the passed flags are not
+ * supported and software fallback needs to be used.
+ * Return a negative error number on any other case for check fail due
+ * to various reason like device not ready or timeouts.
+ */
+ int (*hw_control_is_supported)(struct led_classdev *led_cdev,
+ unsigned long flags);
+ /*
+ * Activate hardware control, LED driver will use the provided flags
+ * from the supported trigger and setup the LED to be driven by hardware
+ * following the requested mode from the trigger flags.
+ * Deactivate hardware blink control by setting brightness to LED_OFF via
+ * the brightness_set() callback.
+ *
+ * Return 0 on success, a negative error number on flags apply fail.
+ */
+ int (*hw_control_set)(struct led_classdev *led_cdev,
+ unsigned long flags);
+ /*
+ * Get from the LED driver the current mode that the LED is set in hw
+ * control mode and put them in flags.
+ * Trigger can use this to get the initial state of a LED already set in
+ * hardware blink control.
+ *
+ * Return 0 on success, a negative error number on failing parsing the
+ * initial mode. Error from this function is NOT FATAL as the device
+ * may be in a not supported initial state by the attached LED trigger.
+ */
+ int (*hw_control_get)(struct led_classdev *led_cdev,
+ unsigned long *flags);
+ /*
+ * Get the device this LED blinks in response to.
+ * e.g. for a PHY LED, it is the network device. If the LED is
+ * not yet associated to a device, return NULL.
+ */
+ struct device *(*hw_control_get_device)(struct led_classdev *led_cdev);
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
@@ -260,6 +314,8 @@ extern struct led_classdev *of_led_get(struct device_node *np, int index);
extern void led_put(struct led_classdev *led_cdev);
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index);
+struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
+ int index);
/**
* led_blink_set - set blinking with software fallback
@@ -272,12 +328,27 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
* software blinking if there is no hardware blinking or if
* the LED refuses the passed values.
*
+ * This function may sleep!
+ *
* Note that if software blinking is active, simply calling
* led_cdev->brightness_set() will not stop the blinking,
* use led_set_brightness() instead.
*/
void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
unsigned long *delay_off);
+
+/**
+ * led_blink_set_nosleep - set blinking, guaranteed to not sleep
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ *
+ * This function makes the LED blink and is guaranteed to not sleep. Otherwise
+ * this is the same as led_blink_set(), see led_blink_set() for details.
+ */
+void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on,
+ unsigned long delay_off);
+
/**
* led_blink_set_oneshot - do a oneshot software blink
* @led_cdev: the LED to start blinking
@@ -291,6 +362,8 @@ void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
*
* If invert is set, led blinks for delay_off first, then for
* delay_on and leave the led on after the on-off cycle.
+ *
+ * This function is guaranteed not to sleep.
*/
void led_blink_set_oneshot(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off,
@@ -433,11 +506,11 @@ void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
void led_trigger_unregister_simple(struct led_trigger *trigger);
void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
-void led_trigger_blink(struct led_trigger *trigger, unsigned long *delay_on,
- unsigned long *delay_off);
+void led_trigger_blink(struct led_trigger *trigger, unsigned long delay_on,
+ unsigned long delay_off);
void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert);
void led_trigger_set_default(struct led_classdev *led_cdev);
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger);
@@ -454,23 +527,6 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
return led_cdev->trigger_data;
}
-/**
- * led_trigger_rename_static - rename a trigger
- * @name: the new trigger name
- * @trig: the LED trigger to rename
- *
- * Change a LED trigger name by copying the string passed in
- * name into current trigger name, which MUST be large
- * enough for the new string.
- *
- * Note that name must NOT point to the same string used
- * during LED registration, as that could lead to races.
- *
- * This is meant to be used on triggers with statically
- * allocated name.
- */
-void led_trigger_rename_static(const char *name, struct led_trigger *trig);
-
#define module_led_trigger(__led_trigger) \
module_driver(__led_trigger, led_trigger_register, \
led_trigger_unregister)
@@ -487,11 +543,11 @@ static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
static inline void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off) {}
+ unsigned long delay_on,
+ unsigned long delay_off) {}
static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
static inline int led_trigger_set(struct led_classdev *led_cdev,
@@ -509,6 +565,24 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
#endif /* CONFIG_LEDS_TRIGGERS */
+/* Trigger specific enum */
+enum led_trigger_netdev_modes {
+ TRIGGER_NETDEV_LINK = 0,
+ TRIGGER_NETDEV_LINK_10,
+ TRIGGER_NETDEV_LINK_100,
+ TRIGGER_NETDEV_LINK_1000,
+ TRIGGER_NETDEV_LINK_2500,
+ TRIGGER_NETDEV_LINK_5000,
+ TRIGGER_NETDEV_LINK_10000,
+ TRIGGER_NETDEV_HALF_DUPLEX,
+ TRIGGER_NETDEV_FULL_DUPLEX,
+ TRIGGER_NETDEV_TX,
+ TRIGGER_NETDEV_RX,
+
+ /* Keep last */
+ __TRIGGER_NETDEV_MAX,
+};
+
/* Trigger specific functions */
#ifdef CONFIG_LEDS_TRIGGER_DISK
void ledtrig_disk_activity(bool write);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index dd5797fb6305..26d68115afb8 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -94,17 +94,19 @@ enum {
ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
ATA_DFLAG_NCQ_SEND_RECV = (1 << 11), /* device supports NCQ SEND and RECV */
ATA_DFLAG_NCQ_PRIO = (1 << 12), /* device supports NCQ priority */
- ATA_DFLAG_CFG_MASK = (1 << 13) - 1,
-
- ATA_DFLAG_PIO = (1 << 13), /* device limited to PIO mode */
- ATA_DFLAG_NCQ_OFF = (1 << 14), /* device limited to non-NCQ mode */
- ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
- ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
- ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
- ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
- ATA_DFLAG_INIT_MASK = (1 << 19) - 1,
-
- ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 19), /* Priority cmds sent to dev */
+ ATA_DFLAG_CDL = (1 << 13), /* supports cmd duration limits */
+ ATA_DFLAG_CFG_MASK = (1 << 14) - 1,
+
+ ATA_DFLAG_PIO = (1 << 14), /* device limited to PIO mode */
+ ATA_DFLAG_NCQ_OFF = (1 << 15), /* device limited to non-NCQ mode */
+ ATA_DFLAG_SLEEPING = (1 << 16), /* device is sleeping */
+ ATA_DFLAG_DUBIOUS_XFER = (1 << 17), /* data transfer not verified */
+ ATA_DFLAG_NO_UNLOAD = (1 << 18), /* device doesn't support unload */
+ ATA_DFLAG_UNLOCK_HPA = (1 << 19), /* unlock HPA */
+ ATA_DFLAG_INIT_MASK = (1 << 20) - 1,
+
+ ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 20), /* Priority cmds sent to dev */
+ ATA_DFLAG_CDL_ENABLED = (1 << 21), /* cmd duration limits is enabled */
ATA_DFLAG_DETACH = (1 << 24),
ATA_DFLAG_DETACHED = (1 << 25),
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
@@ -115,7 +117,8 @@ enum {
ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
- ATA_DFLAG_NCQ_PRIO | ATA_DFLAG_FUA),
+ ATA_DFLAG_NCQ_PRIO | ATA_DFLAG_FUA | \
+ ATA_DFLAG_CDL),
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
@@ -189,6 +192,7 @@ enum {
ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
+ ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
@@ -206,10 +210,12 @@ enum {
ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */
ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */
+ ATA_QCFLAG_HAS_CDL = (1 << 8), /* qc has CDL a descriptor set */
ATA_QCFLAG_EH = (1 << 16), /* cmd aborted and owned by EH */
ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
+ ATA_QCFLAG_EH_SUCCESS_CMD = (1 << 19), /* EH should fetch sense for this successful cmd */
/* host set flags */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
@@ -217,6 +223,10 @@ enum {
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
+ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
+ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
+
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
@@ -250,7 +260,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that
@@ -308,8 +318,11 @@ enum {
ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
+ ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */
+ ATA_EH_SET_ACTIVE = (1 << 7), /* Set a device to active power mode */
- ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
+ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK |
+ ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK,
@@ -337,7 +350,6 @@ enum {
ATA_LINK_RESUME_TRIES = 5,
/* how hard are we gonna try to probe/recover devices */
- ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
ATA_EH_PMP_TRIES = 5,
ATA_EH_PMP_LINK_TRIES = 3,
@@ -347,7 +359,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
@@ -459,7 +471,7 @@ enum ata_completion_errors {
/*
* Link power management policy: If you alter this, you also need to
- * alter libata-scsi.c (for the ascii descriptions)
+ * alter libata-sata.c (for the ascii descriptions)
*/
enum ata_lpm_policy {
ATA_LPM_UNKNOWN,
@@ -644,7 +656,7 @@ struct ata_cpr {
struct ata_cpr_log {
u8 nr_cpr;
- struct ata_cpr cpr[];
+ struct ata_cpr cpr[] __counted_by(nr_cpr);
};
struct ata_device {
@@ -709,6 +721,9 @@ struct ata_device {
/* Concurrent positioning ranges */
struct ata_cpr_log *cpr_log;
+ /* Command Duration Limits log support */
+ u8 cdl[ATA_LOG_CDL_SIZE];
+
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
@@ -860,6 +875,7 @@ struct ata_port {
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
#endif
/* owned by EH */
+ u8 *ncq_sense_buf;
u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
@@ -966,12 +982,6 @@ struct ata_port_operations {
ssize_t size);
/*
- * Obsolete
- */
- void (*phy_reset)(struct ata_port *ap);
- void (*eng_timeout)(struct ata_port *ap);
-
- /*
* ->inherits must be the last field and all the preceding
* fields must be pointers.
*/
@@ -1105,7 +1115,7 @@ static inline void ata_sas_port_resume(struct ata_port *ap)
extern int ata_ratelimit(void);
extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
- u32 val, unsigned long interval, unsigned long timeout);
+ u32 val, unsigned int interval, unsigned int timeout);
extern int atapi_cmd_type(u8 opcode);
extern unsigned int ata_pack_xfermask(unsigned int pio_mask,
unsigned int mwdma_mask,
@@ -1140,12 +1150,13 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
+extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
- struct scsi_device *sdev, int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+ int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
@@ -1155,11 +1166,11 @@ extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *
* SATA specific code - drivers/ata/libata-sata.c
*/
#ifdef CONFIG_SATA_HOST
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
+extern const unsigned int sata_deb_timing_normal[];
+extern const unsigned int sata_deb_timing_hotplug[];
+extern const unsigned int sata_deb_timing_long[];
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
@@ -1174,13 +1185,14 @@ extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
+ const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
+extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
return NULL;
@@ -1200,7 +1212,7 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
static inline int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing,
+ const unsigned int *timing,
unsigned long deadline,
bool *online,
int (*check_ready)(struct ata_link *))
@@ -1210,28 +1222,27 @@ static inline int sata_link_hardreset(struct ata_link *link,
return -EOPNOTSUPP;
}
static inline int sata_link_resume(struct ata_link *link,
- const unsigned long *params,
+ const unsigned int *params,
unsigned long deadline)
{
return -EOPNOTSUPP;
}
+static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
+{
+ return -EOPNOTSUPP;
+}
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
+ const unsigned int *params, unsigned long deadline);
extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
bool spm_wakeup);
extern int ata_slave_link_init(struct ata_port *ap);
-extern void ata_sas_port_destroy(struct ata_port *);
extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
+extern void ata_port_probe(struct ata_port *ap);
extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
extern void ata_sas_tport_delete(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
@@ -1276,7 +1287,7 @@ extern int ata_pci_device_resume(struct pci_dev *pdev);
struct platform_device;
-extern int ata_platform_remove_one(struct platform_device *pdev);
+extern void ata_platform_remove_one(struct platform_device *pdev);
/*
* ACPI - drivers/ata/libata-acpi.c
@@ -1388,6 +1399,7 @@ extern const struct attribute_group *ata_common_sdev_groups[];
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
+ .slave_alloc = ata_scsi_slave_alloc, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity,\
@@ -1549,6 +1561,11 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...);
extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
const char *name);
#endif
+static inline void ata_port_desc_misc(struct ata_port *ap, int irq)
+{
+ ata_port_desc(ap, "irq %d", irq);
+ ata_port_desc(ap, "lpm-pol %d", ap->target_lpm_policy);
+}
static inline bool ata_tag_internal(unsigned int tag)
{
@@ -1769,7 +1786,7 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
{
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
- if (unlikely(!qc) || !ap->ops->error_handler)
+ if (unlikely(!qc))
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
@@ -1860,7 +1877,7 @@ static inline int ata_check_ready(u8 status)
}
static inline unsigned long ata_deadline(unsigned long from_jiffies,
- unsigned long timeout_msecs)
+ unsigned int timeout_msecs)
{
return from_jiffies + msecs_to_jiffies(timeout_msecs);
}
@@ -1869,23 +1886,21 @@ static inline unsigned long ata_deadline(unsigned long from_jiffies,
change in future hardware and specs, secondly 0xFF means 'no DMA' but is
> UDMA_0. Dyma ddreigiau */
-static inline int ata_using_mwdma(struct ata_device *adev)
+static inline bool ata_using_mwdma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_MW_DMA_0 &&
+ adev->dma_mode <= XFER_MW_DMA_4;
}
-static inline int ata_using_udma(struct ata_device *adev)
+static inline bool ata_using_udma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_UDMA_0 &&
+ adev->dma_mode <= XFER_UDMA_7;
}
-static inline int ata_dma_enabled(struct ata_device *adev)
+static inline bool ata_dma_enabled(struct ata_device *adev)
{
- return (adev->dma_mode == 0xFF ? 0 : 1);
+ return adev->dma_mode != 0xFF;
}
/**************************************************************************
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index 53f7e4d0f4b7..9ca9ce4e6e64 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -8,44 +8,59 @@
*/
#include <linux/bitops.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/wait.h>
-#define PS2_CMD_SETSCALE11 0x00e6
-#define PS2_CMD_SETRES 0x10e8
-#define PS2_CMD_GETID 0x02f2
-#define PS2_CMD_RESET_BAT 0x02ff
+struct ps2dev;
-#define PS2_RET_BAT 0xaa
-#define PS2_RET_ID 0x00
-#define PS2_RET_ACK 0xfa
-#define PS2_RET_NAK 0xfe
-#define PS2_RET_ERR 0xfc
+/**
+ * enum ps2_disposition - indicates how received byte should be handled
+ * @PS2_PROCESS: pass to the main protocol handler, process normally
+ * @PS2_IGNORE: skip the byte
+ * @PS2_ERROR: do not process the byte, abort command in progress
+ */
+enum ps2_disposition {
+ PS2_PROCESS,
+ PS2_IGNORE,
+ PS2_ERROR,
+};
-#define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */
-#define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */
-#define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */
-#define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */
-#define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */
-#define PS2_FLAG_ACK_CMD BIT(5) /* Waiting to ACK the command (first) byte */
+typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8,
+ unsigned int);
+typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8);
+/**
+ * struct ps2dev - represents a device using PS/2 protocol
+ * @serio: a serio port used by the PS/2 device
+ * @cmd_mutex: a mutex ensuring that only one command is executing at a time
+ * @wait: a waitqueue used to signal completion from the serio interrupt handler
+ * @flags: various internal flags indicating stages of PS/2 command execution
+ * @cmdbuf: buffer holding command response
+ * @cmdcnt: outstanding number of bytes of the command response
+ * @nak: a byte transmitted by the device when it refuses command
+ * @pre_receive_handler: checks communication errors and returns disposition
+ * (&enum ps2_disposition) of the received data byte
+ * @receive_handler: main handler of particular PS/2 protocol, such as keyboard
+ * or mouse protocol
+ */
struct ps2dev {
struct serio *serio;
-
- /* Ensures that only one command is executing at a time */
struct mutex cmd_mutex;
-
- /* Used to signal completion from interrupt handler */
wait_queue_head_t wait;
-
unsigned long flags;
u8 cmdbuf[8];
u8 cmdcnt;
u8 nak;
+
+ ps2_pre_receive_handler_t pre_receive_handler;
+ ps2_receive_handler_t receive_handler;
};
-void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
+void ps2_init(struct ps2dev *ps2dev, struct serio *serio,
+ ps2_pre_receive_handler_t pre_receive_handler,
+ ps2_receive_handler_t receive_handler);
int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout);
void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout);
void ps2_begin_command(struct ps2dev *ps2dev);
@@ -53,9 +68,8 @@ void ps2_end_command(struct ps2dev *ps2dev);
int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
int ps2_sliced_command(struct ps2dev *ps2dev, u8 command);
-bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data);
-bool ps2_handle_response(struct ps2dev *ps2dev, u8 data);
-void ps2_cmd_aborted(struct ps2dev *ps2dev);
bool ps2_is_keyboard_id(u8 id);
+irqreturn_t ps2_interrupt(struct serio *serio, u8 data, unsigned int flags);
+
#endif /* _LIBPS2_H */
diff --git a/include/linux/limits.h b/include/linux/limits.h
index f6bcc9369010..38eb7f6f7e88 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -10,6 +10,8 @@
#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1))
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
+#define RESOURCE_SIZE_MAX ((resource_size_t)~0)
+
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX >> 1))
#define S8_MIN ((s8)(-S8_MAX - 1))
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index 15e0e0209da4..287f590ed56b 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -10,6 +10,11 @@ static inline void linkmode_zero(unsigned long *dst)
bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static inline void linkmode_fill(unsigned long *dst)
+{
+ bitmap_fill(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
{
bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -43,15 +48,6 @@ static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
__set_bit(nr, addr);
}
-static inline void linkmode_set_bit_array(const int *array, int array_size,
- unsigned long *addr)
-{
- int i;
-
- for (i = 0; i < array_size; i++)
- linkmode_set_bit(array[i], addr);
-}
-
static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
{
__clear_bit(nr, addr);
@@ -71,6 +67,15 @@ static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
return test_bit(nr, addr);
}
+static inline void linkmode_set_bit_array(const int *array, int array_size,
+ unsigned long *addr)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++)
+ linkmode_set_bit(array[i], addr);
+}
+
static inline int linkmode_equal(const unsigned long *src1,
const unsigned long *src2)
{
diff --git a/include/linux/list.h b/include/linux/list.h
index f10344dbad4d..059aa1fff41e 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -38,11 +38,92 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
WRITE_ONCE(list->prev, list);
}
+#ifdef CONFIG_LIST_HARDENED
+
#ifdef CONFIG_DEBUG_LIST
-extern bool __list_add_valid(struct list_head *new,
- struct list_head *prev,
- struct list_head *next);
-extern bool __list_del_entry_valid(struct list_head *entry);
+# define __list_valid_slowpath
+#else
+# define __list_valid_slowpath __cold __preserve_most
+#endif
+
+/*
+ * Performs the full set of list corruption checks before __list_add().
+ * On list corruption reports a warning, and returns false.
+ */
+extern bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+
+/*
+ * Performs list corruption checks before __list_add(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_add_valid_or_report().
+ */
+static __always_inline bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, since the immediate dereference of them below would
+ * result in a fault if NULL.
+ *
+ * With the reduced set of checks, we can afford to inline the
+ * checks, which also gives the compiler a chance to elide some
+ * of them completely if they can be proven at compile-time. If
+ * one of the pre-conditions does not hold, the slow-path will
+ * show a report which pre-condition failed.
+ */
+ if (likely(next->prev == prev && prev->next == next && new != prev && new != next))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_add_valid_or_report(new, prev, next);
+ return ret;
+}
+
+/*
+ * Performs the full set of list corruption checks before __list_del_entry().
+ * On list corruption reports a warning, and returns false.
+ */
+extern bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry);
+
+/*
+ * Performs list corruption checks before __list_del_entry(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_del_entry_valid_or_report().
+ */
+static __always_inline bool __list_del_entry_valid(struct list_head *entry)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ struct list_head *prev = entry->prev;
+ struct list_head *next = entry->next;
+
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, LIST_POISON1 or LIST_POISON2, since the immediate
+ * dereference of them below would result in a fault.
+ */
+ if (likely(prev->next == entry && next->prev == entry))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_del_entry_valid_or_report(entry);
+ return ret;
+}
#else
static inline bool __list_add_valid(struct list_head *new,
struct list_head *prev,
@@ -606,6 +687,14 @@ static inline void list_splice_tail_init(struct list_head *list,
for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
/**
+ * list_for_each_reverse - iterate backwards over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_reverse(pos, head) \
+ for (pos = (head)->prev; pos != (head); pos = pos->prev)
+
+/**
* list_for_each_rcu - Iterate over a list in an RCU-safe fashion
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
@@ -1030,6 +1119,26 @@ static inline void hlist_move_list(struct hlist_head *old,
old->first = NULL;
}
+/**
+ * hlist_splice_init() - move all entries from one list to another
+ * @from: hlist_head from which entries will be moved
+ * @last: last entry on the @from list
+ * @to: hlist_head to which entries will be moved
+ *
+ * @to can be empty, @from must contain at least @last.
+ */
+static inline void hlist_splice_init(struct hlist_head *from,
+ struct hlist_node *last,
+ struct hlist_head *to)
+{
+ if (to->first)
+ to->first->pprev = &last->next;
+ last->next = to->first;
+ to->first = from->first;
+ from->first->pprev = &to->first;
+ from->first = NULL;
+}
+
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index b35968ee9fb5..7675a48a0701 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -73,8 +73,10 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
/**
* list_lru_add: add an element to the lru list's tail
- * @list_lru: the lru pointer
+ * @lru: the lru pointer
* @item: the item to be added.
+ * @nid: the node id of the sublist to add the item to.
+ * @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
* nothing. Therefore the caller does not need to keep state about whether or
@@ -83,24 +85,54 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren
* the caller organize itself in a way that elements can be in more than
* one type of list, it is up to the caller to fully remove the item from
* the previous list (with list_lru_del() for instance) before moving it
- * to @list_lru
+ * to @lru.
+ *
+ * Return: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_add_obj: add an element to the lru list's tail
+ * @lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * This function is similar to list_lru_add(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
*
* Return value: true if the list was updated, false otherwise
*/
-bool list_lru_add(struct list_lru *lru, struct list_head *item);
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
/**
- * list_lru_del: delete an element to the lru list
- * @list_lru: the lru pointer
+ * list_lru_del: delete an element from the lru list
+ * @lru: the lru pointer
* @item: the item to be deleted.
+ * @nid: the node id of the sublist to delete the item from.
+ * @memcg: the cgroup of the sublist to delete the item from.
*
- * This function works analogously as list_lru_add in terms of list
+ * This function works analogously as list_lru_add() in terms of list
* manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del.
+ * a list are also valid for list_lru_del().
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise
*/
-bool list_lru_del(struct list_lru *lru, struct list_head *item);
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_del_obj: delete an element from the lru list
+ * @lru: the lru pointer
+ * @item: the item to be deleted.
+ *
+ * This function is similar to list_lru_del(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
+ *
+ * Return value: true if the list was updated, false otherwise.
+ */
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
/**
* list_lru_count_one: return the number of objects currently held by @lru
@@ -108,9 +140,11 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* @nid: the node id to count from.
* @memcg: the cgroup to count from.
*
- * Always return a non-negative number, 0 for empty lists. There is no
- * guarantee that the list is not updated while the count is being computed.
- * Callers that want such a guarantee need to provide an outer lock.
+ * There is no guarantee that the list is not updated while the count is being
+ * computed. Callers that want such a guarantee need to provide an outer lock.
+ *
+ * Return: 0 for empty lists, otherwise the number of objects
+ * currently held by @lru.
*/
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg);
@@ -136,12 +170,28 @@ static inline unsigned long list_lru_count(struct list_lru *lru)
void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);
+/**
+ * list_lru_putback: undo list_lru_isolate
+ * @lru: the lru pointer.
+ * @item: the item to put back.
+ * @nid: the node id of the sublist to put the item back to.
+ * @memcg: the cgroup of the sublist to put the item back to.
+ *
+ * Put back an isolated item into its original LRU. Note that unlike
+ * list_lru_add, this does not increment the node LRU count (as
+ * list_lru_isolate does not originally decrement this count).
+ *
+ * Since we might have dropped the LRU lock in between, recompute list_lru_one
+ * from the node's id and memcg.
+ */
+void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
/**
- * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
@@ -150,24 +200,24 @@ typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * This function will scan all elements in a particular list_lru, calling the
+ * This function will scan all elements in a particular @lru, calling the
* @isolate callback for each of those items, along with the current list
* spinlock and a caller-provided opaque. The @isolate callback can choose to
* drop the lock internally, but *must* return with the lock held. The callback
- * will return an enum lru_status telling the list_lru infrastructure what to
+ * will return an enum lru_status telling the @lru infrastructure what to
* do with the object being scanned.
*
- * Please note that nr_to_walk does not mean how many objects will be freed,
+ * Please note that @nr_to_walk does not mean how many objects will be freed,
* just how many objects will be scanned.
*
- * Return value: the number of objects effectively removed from the LRU.
+ * Return: the number of objects effectively removed from the LRU.
*/
unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
/**
- * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
@@ -176,7 +226,7 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * Same as @list_lru_walk_one except that the spinlock is acquired with
+ * Same as list_lru_walk_one() except that the spinlock is acquired with
* spin_lock_irq().
*/
unsigned long list_lru_walk_one_irq(struct list_lru *lru,
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 85bda2d02d65..2c982ff7475a 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -74,6 +74,33 @@ static inline void init_llist_head(struct llist_head *list)
}
/**
+ * init_llist_node - initialize lock-less list node
+ * @node: the node to be initialised
+ *
+ * In cases where there is a need to test if a node is on
+ * a list or not, this initialises the node to clearly
+ * not be on any list.
+ */
+static inline void init_llist_node(struct llist_node *node)
+{
+ node->next = node;
+}
+
+/**
+ * llist_on_list - test if a lock-list list node is on a list
+ * @node: the node to test
+ *
+ * When a node is on a list the ->next pointer will be NULL or
+ * some other node. It can never point to itself. We use that
+ * in init_llist_node() to record that a node is not on any list,
+ * and here to test whether it is on any list.
+ */
+static inline bool llist_on_list(const struct llist_node *node)
+{
+ return node->next != node;
+}
+
+/**
* llist_entry - get the struct of this entry
* @ptr: the &struct llist_node pointer.
* @type: the type of the struct this is embedded in.
@@ -249,6 +276,25 @@ static inline struct llist_node *__llist_del_all(struct llist_head *head)
extern struct llist_node *llist_del_first(struct llist_head *head);
+/**
+ * llist_del_first_init - delete first entry from lock-list and mark is as being off-list
+ * @head: the head of lock-less list to delete from.
+ *
+ * This behave the same as llist_del_first() except that llist_init_node() is called
+ * on the returned node so that llist_on_list() will report false for the node.
+ */
+static inline struct llist_node *llist_del_first_init(struct llist_head *head)
+{
+ struct llist_node *n = llist_del_first(head);
+
+ if (n)
+ init_llist_node(n);
+ return n;
+}
+
+extern bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this);
+
struct llist_node *llist_reverse_order(struct llist_node *head);
#endif /* LLIST_H */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 3bc9f7410e21..c53c81242e72 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -20,6 +20,7 @@
/* Dummy declarations */
struct svc_rqst;
struct rpc_task;
+struct rpc_clnt;
/*
* This is the set of functions for lockd->nfsd communication
@@ -56,6 +57,7 @@ struct nlmclnt_initdata {
extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
extern void nlmclnt_done(struct nlm_host *host);
+extern struct rpc_clnt *nlmclnt_rpc_clnt(struct nlm_host *host);
/*
* NLM client operations provide a means to modify RPC processing of NLM
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index f42594a9efe0..9f565416d186 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -204,6 +204,8 @@ extern unsigned long nlmsvc_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
+extern struct timer_list nlmsvc_retry;
+
/*
* Lockd client functions
*/
@@ -280,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *,
struct nlm_lock *, struct nlm_cookie *);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
-unsigned long nlmsvc_retry_blocked(void);
+void nlmsvc_retry_blocked(struct svc_rqst *rqstp);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 74bd269a80a2..08b0d1d9d78b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -82,63 +82,6 @@ struct lock_chain {
u64 chain_key;
};
-#define MAX_LOCKDEP_KEYS_BITS 13
-#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
-#define INITIAL_CHAIN_KEY -1
-
-struct held_lock {
- /*
- * One-way hash of the dependency chain up to this point. We
- * hash the hashes step by step as the dependency chain grows.
- *
- * We use it for dependency-caching and we skip detection
- * passes and dependency-updates if there is a cache-hit, so
- * it is absolutely critical for 100% coverage of the validator
- * to have a unique key value for every unique dependency path
- * that can occur in the system, to make a unique hash value
- * as likely as possible - hence the 64-bit width.
- *
- * The task struct holds the current hash value (initialized
- * with zero), here we store the previous hash value:
- */
- u64 prev_chain_key;
- unsigned long acquire_ip;
- struct lockdep_map *instance;
- struct lockdep_map *nest_lock;
-#ifdef CONFIG_LOCK_STAT
- u64 waittime_stamp;
- u64 holdtime_stamp;
-#endif
- /*
- * class_idx is zero-indexed; it points to the element in
- * lock_classes this held lock instance belongs to. class_idx is in
- * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
- */
- unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
- /*
- * The lock-stack is unified in that the lock chains of interrupt
- * contexts nest ontop of process context chains, but we 'separate'
- * the hashes by starting with 0 if we cross into an interrupt
- * context, and we also keep do not add cross-context lock
- * dependencies - the lock usage graph walking covers that area
- * anyway, and we'd just unnecessarily increase the number of
- * dependencies otherwise. [Note: hardirq and softirq contexts
- * are separated from each other too.]
- *
- * The following field is used to detect when we cross into an
- * interrupt context:
- */
- unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
- unsigned int trylock:1; /* 16 bits */
-
- unsigned int read:2; /* see lock_acquire() comment */
- unsigned int check:1; /* see lock_acquire() comment */
- unsigned int hardirqs_off:1;
- unsigned int sync:1;
- unsigned int references:11; /* 32 bits */
- unsigned int pin_count;
-};
-
/*
* Initialization, self-test and debugging-output methods:
*/
@@ -447,6 +390,14 @@ extern int lockdep_is_held(const void *);
#endif /* !LOCKDEP */
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn);
+
+#define lock_set_cmp_fn(lock, ...) lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__)
+#else
+#define lock_set_cmp_fn(lock, ...) do { } while (0)
+#endif
+
enum xhlock_context_t {
XHLOCK_HARD,
XHLOCK_SOFT,
@@ -617,6 +568,12 @@ do { \
WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
} while (0)
+#define lockdep_assert_no_hardirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
+ !this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
#define lockdep_assert_preemption_enabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
@@ -651,6 +608,7 @@ do { \
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
+# define lockdep_assert_no_hardirq() do { } while (0)
# define lockdep_assert_preemption_enabled() do { } while (0)
# define lockdep_assert_preemption_disabled() do { } while (0)
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 59f4fb1626ea..70d30d40ea4a 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -85,6 +85,11 @@ struct lock_trace;
#define LOCKSTAT_POINTS 4
+struct lockdep_map;
+typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
+ const struct lockdep_map *b);
+typedef void (*lock_print_fn)(const struct lockdep_map *map);
+
/*
* The lock-class itself. The order of the structure members matters.
* reinit_class() zeroes the key member and all subsequent members.
@@ -110,6 +115,9 @@ struct lock_class {
struct list_head locks_after, locks_before;
const struct lockdep_subclass_key *key;
+ lock_cmp_fn cmp_fn;
+ lock_print_fn print_fn;
+
unsigned int subclass;
unsigned int dep_gen_id;
@@ -119,12 +127,12 @@ struct lock_class {
unsigned long usage_mask;
const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
+ const char *name;
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
int name_version;
- const char *name;
u8 wait_type_inner;
u8 wait_type_outer;
@@ -190,6 +198,63 @@ struct lockdep_map {
struct pin_cookie { unsigned int val; };
+#define MAX_LOCKDEP_KEYS_BITS 13
+#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
+#define INITIAL_CHAIN_KEY -1
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+ struct lockdep_map *nest_lock;
+#ifdef CONFIG_LOCK_STAT
+ u64 waittime_stamp;
+ u64 holdtime_stamp;
+#endif
+ /*
+ * class_idx is zero-indexed; it points to the element in
+ * lock_classes this held lock instance belongs to. class_idx is in
+ * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
+ */
+ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
+ unsigned int trylock:1; /* 16 bits */
+
+ unsigned int read:2; /* see lock_acquire() comment */
+ unsigned int check:1; /* see lock_acquire() comment */
+ unsigned int hardirqs_off:1;
+ unsigned int sync:1;
+ unsigned int references:11; /* 32 bits */
+ unsigned int pin_count;
+};
+
#else /* !CONFIG_LOCKDEP */
/*
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index 54945aa824b4..babf4e3c28ba 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -39,9 +39,6 @@ struct logic_pio_host_ops {
#ifdef CONFIG_INDIRECT_PIO
u8 logic_inb(unsigned long addr);
-void logic_outb(u8 value, unsigned long addr);
-void logic_outw(u16 value, unsigned long addr);
-void logic_outl(u32 value, unsigned long addr);
u16 logic_inw(unsigned long addr);
u32 logic_inl(unsigned long addr);
void logic_outb(u8 value, unsigned long addr);
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 6bb55e61e8e8..76458b6d53da 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -32,28 +32,29 @@ LSM_HOOK(int, 0, binder_transaction, const struct cred *from,
LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from,
const struct cred *to)
LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from,
- const struct cred *to, struct file *file)
+ const struct cred *to, const struct file *file)
LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child,
unsigned int mode)
LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent)
-LSM_HOOK(int, 0, capget, struct task_struct *target, kernel_cap_t *effective,
+LSM_HOOK(int, 0, capget, const struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old,
const kernel_cap_t *effective, const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns,
int cap, unsigned int opts)
-LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb)
+LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, const struct super_block *sb)
LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
-LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
-LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
-LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
-LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, const struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, const struct linux_binprm *bprm)
+LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference)
LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
struct fs_context *src_sc)
LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
@@ -65,7 +66,7 @@ LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts)
LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts)
LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts)
LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts)
-LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb)
+LSM_HOOK(int, 0, sb_kern_mount, const struct super_block *sb)
LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb)
LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry)
LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path,
@@ -111,9 +112,9 @@ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
unsigned int obj_type)
LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
-LSM_HOOK(int, 0, inode_init_security, struct inode *inode,
- struct inode *dir, const struct qstr *qstr, const char **name,
- void **value, size_t *len)
+LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode,
+ struct inode *dir, const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count)
LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode,
const struct qstr *name, const struct inode *context_inode)
LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry,
@@ -170,6 +171,8 @@ LSM_HOOK(int, 0, file_alloc_security, struct file *file)
LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
unsigned long arg)
+LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
+ unsigned long arg)
LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
@@ -261,6 +264,10 @@ LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
struct inode *inode)
+LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr,
+ struct lsm_ctx __user *ctx, size_t *size, u32 flags)
+LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr,
+ struct lsm_ctx *ctx, size_t size, u32 flags)
LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
char **value)
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
@@ -272,7 +279,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
u32 *ctxlen)
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
@@ -308,15 +315,15 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
-LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock,
sockptr_t optval, sockptr_t optlen, unsigned int len)
-LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock,
struct sk_buff *skb, u32 *secid)
LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk,
struct sock *newsk)
-LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, const struct sock *sk, u32 *secid)
LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent)
LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
@@ -343,6 +350,7 @@ LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc,
struct sock *sk, struct sock *newsk)
LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc,
struct sk_buff *skb)
+LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk)
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index ab2b2fafa4a4..a2ade0ffe9e7 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -25,9 +25,11 @@
#ifndef __LINUX_LSM_HOOKS_H
#define __LINUX_LSM_HOOKS_H
+#include <uapi/linux/lsm.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/rculist.h>
+#include <linux/xattr.h>
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
@@ -41,6 +43,18 @@ struct security_hook_heads {
#undef LSM_HOOK
} __randomize_layout;
+/**
+ * struct lsm_id - Identify a Linux Security Module.
+ * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @id: LSM ID number from uapi/linux/lsm.h
+ *
+ * Contains the information that identifies the LSM.
+ */
+struct lsm_id {
+ const char *name;
+ u64 id;
+};
+
/*
* Security module hook list structure.
* For use with generic list macros for common operations.
@@ -49,7 +63,7 @@ struct security_hook_list {
struct hlist_node list;
struct hlist_head *head;
union security_list_options hook;
- const char *lsm;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
@@ -63,8 +77,27 @@ struct lsm_blob_sizes {
int lbs_ipc;
int lbs_msg_msg;
int lbs_task;
+ int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
};
+/**
+ * lsm_get_xattr_slot - Return the next available slot and increment the index
+ * @xattrs: array storing LSM-provided xattrs
+ * @xattr_count: number of already stored xattrs (updated)
+ *
+ * Retrieve the first available slot in the @xattrs array to fill with an xattr,
+ * and increment @xattr_count.
+ *
+ * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
+ */
+static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
+ int *xattr_count)
+{
+ if (unlikely(!xattrs))
+ return NULL;
+ return &xattrs[(*xattr_count)++];
+}
+
/*
* LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
* LSM hooks (in include/linux/lsm_hook_defs.h).
@@ -84,7 +117,7 @@ extern struct security_hook_heads security_hook_heads;
extern char *lsm_names;
extern void security_add_hooks(struct security_hook_list *hooks, int count,
- const char *lsm);
+ const struct lsm_id *lsmid);
#define LSM_FLAG_LEGACY_MAJOR BIT(0)
#define LSM_FLAG_EXCLUSIVE BIT(1)
diff --git a/include/linux/lwq.h b/include/linux/lwq.h
new file mode 100644
index 000000000000..d081d5cf8e33
--- /dev/null
+++ b/include/linux/lwq.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LWQ_H
+#define LWQ_H
+/*
+ * Light-weight single-linked queue built from llist
+ *
+ * Entries can be enqueued from any context with no locking.
+ * Entries can be dequeued from process context with integrated locking.
+ *
+ * This is particularly suitable when work items are queued in
+ * BH or IRQ context, and where work items are handled one at a time
+ * by dedicated threads.
+ */
+#include <linux/container_of.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+
+struct lwq_node {
+ struct llist_node node;
+};
+
+struct lwq {
+ spinlock_t lock;
+ struct llist_node *ready; /* entries to be dequeued */
+ struct llist_head new; /* entries being enqueued */
+};
+
+/**
+ * lwq_init - initialise a lwq
+ * @q: the lwq object
+ */
+static inline void lwq_init(struct lwq *q)
+{
+ spin_lock_init(&q->lock);
+ q->ready = NULL;
+ init_llist_head(&q->new);
+}
+
+/**
+ * lwq_empty - test if lwq contains any entry
+ * @q: the lwq object
+ *
+ * This empty test contains an acquire barrier so that if a wakeup
+ * is sent when lwq_dequeue returns true, it is safe to go to sleep after
+ * a test on lwq_empty().
+ */
+static inline bool lwq_empty(struct lwq *q)
+{
+ /* acquire ensures ordering wrt lwq_enqueue() */
+ return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new);
+}
+
+struct llist_node *__lwq_dequeue(struct lwq *q);
+/**
+ * lwq_dequeue - dequeue first (oldest) entry from lwq
+ * @q: the queue to dequeue from
+ * @type: the type of object to return
+ * @member: them member in returned object which is an lwq_node.
+ *
+ * Remove a single object from the lwq and return it. This will take
+ * a spinlock and so must always be called in the same context, typcially
+ * process contet.
+ */
+#define lwq_dequeue(q, type, member) \
+ ({ struct llist_node *_n = __lwq_dequeue(q); \
+ _n ? container_of(_n, type, member.node) : NULL; })
+
+struct llist_node *lwq_dequeue_all(struct lwq *q);
+
+/**
+ * lwq_for_each_safe - iterate over detached queue allowing deletion
+ * @_n: iterator variable
+ * @_t1: temporary struct llist_node **
+ * @_t2: temporary struct llist_node *
+ * @_l: address of llist_node pointer from lwq_dequeue_all()
+ * @_member: member in _n where lwq_node is found.
+ *
+ * Iterate over members in a dequeued list. If the iterator variable
+ * is set to NULL, the iterator removes that entry from the queue.
+ */
+#define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \
+ for (_t1 = (_l); \
+ *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
+ _t2 = ((*_t1)->next), \
+ true) \
+ : false; \
+ (_n) ? (_t1 = &(_n)->_member.node.next, 0) \
+ : ((*(_t1) = (_t2)), 0))
+
+/**
+ * lwq_enqueue - add a new item to the end of the queue
+ * @n - the lwq_node embedded in the item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q)
+{
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add(&n->node, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+
+/**
+ * lwq_enqueue_batch - add a list of new items to the end of the queue
+ * @n - the lwq_node embedded in the first item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q)
+{
+ struct llist_node *e = n;
+
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add_batch(llist_reverse_order(n), e, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+#endif /* LWQ_H */
diff --git a/include/linux/maple.h b/include/linux/maple.h
index 9b140272ee16..9aae44efcfd4 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -5,7 +5,6 @@
#include <mach/maple.h>
struct device;
-extern struct bus_type maple_bus_type;
/* Maple Bus command and response codes */
enum maple_code {
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 1fadb5f5978b..b3d63123b945 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -29,14 +29,12 @@
#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
-#define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */
#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
#else
/* 32bit sizes */
#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
-#define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */
#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
@@ -184,13 +182,23 @@ enum maple_type {
#ifdef CONFIG_LOCKDEP
typedef struct lockdep_map *lockdep_map_p;
-#define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock)
+#define mt_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
+
+#define mt_write_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || \
+ lock_is_held_type((mt)->ma_external_lock, 0))
+
#define mt_set_external_lock(mt, lock) \
(mt)->ma_external_lock = &(lock)->dep_map
+
+#define mt_on_stack(mt) (mt).ma_external_lock = NULL
#else
typedef struct { /* nothing */ } lockdep_map_p;
-#define mt_lock_is_held(mt) 1
+#define mt_lock_is_held(mt) 1
+#define mt_write_lock_is_held(mt) 1
#define mt_set_external_lock(mt, lock) do { } while (0)
+#define mt_on_stack(mt) do { } while (0)
#endif
/*
@@ -212,8 +220,8 @@ struct maple_tree {
spinlock_t ma_lock;
lockdep_map_p ma_external_lock;
};
- void __rcu *ma_root;
unsigned int ma_flags;
+ void __rcu *ma_root;
};
/**
@@ -248,6 +256,8 @@ struct maple_tree {
struct maple_tree name = MTREE_INIT(name, 0)
#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_lock_nested(mas, subclass) \
+ spin_lock_nested((&(mt)->ma_lock), subclass)
#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
/*
@@ -319,6 +329,9 @@ int mtree_store(struct maple_tree *mt, unsigned long index,
void *entry, gfp_t gfp);
void *mtree_erase(struct maple_tree *mt, unsigned long index);
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+
void mtree_destroy(struct maple_tree *mt);
void __mt_destroy(struct maple_tree *mt);
@@ -337,6 +350,36 @@ static inline bool mtree_empty(const struct maple_tree *mt)
/* Advanced API */
/*
+ * Maple State Status
+ * ma_active means the maple state is pointing to a node and offset and can
+ * continue operating on the tree.
+ * ma_start means we have not searched the tree.
+ * ma_root means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * ma_none means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * ma_pause means the data within the maple state may be stale, restart the
+ * operation
+ * ma_overflow means the search has reached the upper limit of the search
+ * ma_underflow means the search has reached the lower limit of the search
+ * ma_error means there was an error, check the node for the error number.
+ */
+enum maple_status {
+ ma_active,
+ ma_start,
+ ma_root,
+ ma_none,
+ ma_pause,
+ ma_overflow,
+ ma_underflow,
+ ma_error,
+};
+
+/*
* The maple state is defined in the struct ma_state and is used to keep track
* of information during operations, and even between operations when using the
* advanced API.
@@ -368,6 +411,13 @@ static inline bool mtree_empty(const struct maple_tree *mt)
* When returning a value the maple state index and last respectively contain
* the start and end of the range for the entry. Ranges are inclusive in the
* Maple Tree.
+ *
+ * The status of the state is used to determine how the next action should treat
+ * the state. For instance, if the status is ma_start then the next action
+ * should start at the root of the tree and walk down. If the status is
+ * ma_pause then the node may be stale data and should be discarded. If the
+ * status is ma_overflow, then the last action hit the upper limit.
+ *
*/
struct ma_state {
struct maple_tree *tree; /* The tree we're operating in */
@@ -377,9 +427,11 @@ struct ma_state {
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
unsigned char offset;
unsigned char mas_flags;
+ unsigned char end; /* The end of the node */
};
struct ma_wr_state {
@@ -389,7 +441,6 @@ struct ma_wr_state {
unsigned long r_max; /* range max */
enum maple_type type; /* mas->node type */
unsigned char offset_end; /* The offset where the write ends */
- unsigned char node_end; /* mas->node end */
unsigned long *pivots; /* mas->node->pivots pointer */
unsigned long end_piv; /* The pivot at the offset end */
void __rcu **slots; /* mas->node->slots pointer */
@@ -398,28 +449,16 @@ struct ma_wr_state {
};
#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_lock_nested(mas, subclass) \
+ spin_lock_nested(&((mas)->tree->ma_lock), subclass)
#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
-
/*
* Special values for ma_state.node.
- * MAS_START means we have not searched the tree.
- * MAS_ROOT means we have searched the tree and the entry we found lives in
- * the root of the tree (ie it has index 0, length 1 and is the only entry in
- * the tree).
- * MAS_NONE means we have searched the tree and there is no node in the
- * tree for this entry. For example, we searched for index 1 in an empty
- * tree. Or we have a tree which points to a full leaf node and we
- * searched for an entry which is larger than can be contained in that
- * leaf node.
* MA_ERROR represents an errno. After dropping the lock and attempting
* to resolve the error, the walk would have to be restarted from the
* top of the tree as the tree may have been modified.
*/
-#define MAS_START ((struct maple_enode *)1UL)
-#define MAS_ROOT ((struct maple_enode *)5UL)
-#define MAS_NONE ((struct maple_enode *)9UL)
-#define MAS_PAUSE ((struct maple_enode *)17UL)
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
@@ -428,7 +467,8 @@ struct ma_wr_state {
.tree = mt, \
.index = first, \
.last = end, \
- .node = MAS_START, \
+ .node = NULL, \
+ .status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
.alloc = NULL, \
@@ -455,9 +495,10 @@ void *mas_erase(struct ma_state *mas);
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
void mas_store_prealloc(struct ma_state *mas, void *entry);
void *mas_find(struct ma_state *mas, unsigned long max);
+void *mas_find_range(struct ma_state *mas, unsigned long max);
void *mas_find_rev(struct ma_state *mas, unsigned long min);
-int mas_preallocate(struct ma_state *mas, gfp_t gfp);
-bool mas_is_err(struct ma_state *mas);
+void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
bool mas_nomem(struct ma_state *mas, gfp_t gfp);
void mas_pause(struct ma_state *mas);
@@ -466,10 +507,18 @@ void mas_destroy(struct ma_state *mas);
int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
void *mas_prev(struct ma_state *mas, unsigned long min);
+void *mas_prev_range(struct ma_state *mas, unsigned long max);
void *mas_next(struct ma_state *mas, unsigned long max);
+void *mas_next_range(struct ma_state *mas, unsigned long max);
int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
unsigned long size);
+/*
+ * This finds an empty area from the highest address to the lowest.
+ * AKA "Topdown" version,
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size);
static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
unsigned long addr)
@@ -478,27 +527,20 @@ static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
mas->tree = tree;
mas->index = mas->last = addr;
mas->max = ULONG_MAX;
- mas->node = MAS_START;
+ mas->status = ma_start;
+ mas->node = NULL;
}
-/* Checks if a mas has not found anything */
-static inline bool mas_is_none(struct ma_state *mas)
+static inline bool mas_is_active(struct ma_state *mas)
{
- return mas->node == MAS_NONE;
+ return mas->status == ma_active;
}
-/* Checks if a mas has been paused */
-static inline bool mas_is_paused(struct ma_state *mas)
+static inline bool mas_is_err(struct ma_state *mas)
{
- return mas->node == MAS_PAUSE;
+ return mas->status == ma_error;
}
-/*
- * This finds an empty area from the highest address to the lowest.
- * AKA "Topdown" version,
- */
-int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
- unsigned long max, unsigned long size);
/**
* mas_reset() - Reset a Maple Tree operation state.
* @mas: Maple Tree operation state.
@@ -509,9 +551,10 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
*
* Context: Any context.
*/
-static inline void mas_reset(struct ma_state *mas)
+static __always_inline void mas_reset(struct ma_state *mas)
{
- mas->node = MAS_START;
+ mas->status = ma_start;
+ mas->node = NULL;
}
/**
@@ -528,6 +571,149 @@ static inline void mas_reset(struct ma_state *mas)
#define mas_for_each(__mas, __entry, __max) \
while (((__entry) = mas_find((__mas), (__max))) != NULL)
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+enum mt_dump_format {
+ mt_dump_dec,
+ mt_dump_hex,
+};
+
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
+void mas_dump(const struct ma_state *mas);
+void mas_wr_dump(const struct ma_wr_state *wr_mas);
+void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_BUG_ON(__mas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_WR_BUG_ON(__wrmas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MT_WARN_ON(__tree, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WARN_ON(__mas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
+#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
+#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
+/**
+ * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
+ * current location.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * set the internal maple state values to a sub-range.
+ * Please use mas_set_range() if you do not know where you are in the tree.
+ */
+static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
+ unsigned long last)
+{
+ /* Ensure the range starts within the current slot */
+ MAS_WARN_ON(mas, mas_is_active(mas) &&
+ (mas->index > start || mas->last < start));
+ mas->index = start;
+ mas->last = last;
+}
/**
* mas_set_range() - Set up Maple Tree operation state for a different index.
@@ -542,9 +728,8 @@ static inline void mas_reset(struct ma_state *mas)
static inline
void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
{
- mas->index = start;
- mas->last = last;
- mas->node = MAS_START;
+ mas_reset(mas);
+ __mas_set_range(mas, start, last);
}
/**
@@ -616,7 +801,7 @@ static inline void mt_clear_in_rcu(struct maple_tree *mt)
return;
if (mt_external_lock(mt)) {
- BUG_ON(!mt_lock_is_held(mt));
+ WARN_ON(!mt_lock_is_held(mt));
mt->ma_flags &= ~MT_FLAGS_USE_RCU;
} else {
mtree_lock(mt);
@@ -635,7 +820,7 @@ static inline void mt_set_in_rcu(struct maple_tree *mt)
return;
if (mt_external_lock(mt)) {
- BUG_ON(!mt_lock_is_held(mt));
+ WARN_ON(!mt_lock_is_held(mt));
mt->ma_flags |= MT_FLAGS_USE_RCU;
} else {
mtree_lock(mt);
@@ -659,39 +844,14 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
* mt_for_each - Iterate over each entry starting at index until max.
* @__tree: The Maple Tree
* @__entry: The current entry
- * @__index: The index to update to track the location in the tree
+ * @__index: The index to start the search from. Subsequently used as iterator.
* @__max: The maximum limit for @index
*
- * Note: Will not return the zero entry.
+ * This iterator skips all entries, which resolve to a NULL pointer,
+ * e.g. entries which has been reserved with XA_ZERO_ENTRY.
*/
#define mt_for_each(__tree, __entry, __index, __max) \
for (__entry = mt_find(__tree, &(__index), __max); \
__entry; __entry = mt_find_after(__tree, &(__index), __max))
-
-#ifdef CONFIG_DEBUG_MAPLE_TREE
-extern atomic_t maple_tree_tests_run;
-extern atomic_t maple_tree_tests_passed;
-
-void mt_dump(const struct maple_tree *mt);
-void mt_validate(struct maple_tree *mt);
-void mt_cache_shrink(void);
-#define MT_BUG_ON(__tree, __x) do { \
- atomic_inc(&maple_tree_tests_run); \
- if (__x) { \
- pr_info("BUG at %s:%d (%u)\n", \
- __func__, __LINE__, __x); \
- mt_dump(__tree); \
- pr_info("Pass: %u Run:%u\n", \
- atomic_read(&maple_tree_tests_passed), \
- atomic_read(&maple_tree_tests_run)); \
- dump_stack(); \
- } else { \
- atomic_inc(&maple_tree_tests_passed); \
- } \
-} while (0)
-#else
-#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
-#endif /* CONFIG_DEBUG_MAPLE_TREE */
-
#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 0f06c2287b52..9b54c4f0677f 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -25,6 +25,7 @@
#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
#define MARVELL_PHY_ID_88X2222 0x01410f10
+#define MARVELL_PHY_ID_88Q2110 0x002b0980
/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
diff --git a/include/linux/math.h b/include/linux/math.h
index 439b8f0b9ebd..dd4152711de7 100644
--- a/include/linux/math.h
+++ b/include/linux/math.h
@@ -118,17 +118,17 @@ __STRUCT_FRACT(s32)
__STRUCT_FRACT(u32)
#undef __STRUCT_FRACT
-/*
- * Multiplies an integer by a fraction, while avoiding unnecessary
- * overflow or loss of precision.
- */
-#define mult_frac(x, numer, denom)( \
-{ \
- typeof(x) quot = (x) / (denom); \
- typeof(x) rem = (x) % (denom); \
- (quot * (numer)) + ((rem * (numer)) / (denom)); \
-} \
-)
+/* Calculate "x * n / d" without unnecessary overflow or loss of precision. */
+#define mult_frac(x, n, d) \
+({ \
+ typeof(x) x_ = (x); \
+ typeof(n) n_ = (n); \
+ typeof(d) d_ = (d); \
+ \
+ typeof(x_) q = x_ / d_; \
+ typeof(x_) r = x_ % d_; \
+ q * n_ + r * n_ / d_; \
+})
#define sector_div(a, b) do_div(a, b)
@@ -156,6 +156,25 @@ __STRUCT_FRACT(u32)
({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
/**
+ * abs_diff - return absolute value of the difference between the arguments
+ * @a: the first argument
+ * @b: the second argument
+ *
+ * @a and @b have to be of the same type. With this restriction we compare
+ * signed to signed and unsigned to unsigned. The result is the subtraction
+ * the smaller of the two from the bigger, hence result is always a positive
+ * value.
+ *
+ * Return: an absolute value of the difference between the @a and @b.
+ */
+#define abs_diff(a, b) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ (void)(&__a == &__b); \
+ __a > __b ? (__a - __b) : (__b - __a); \
+})
+
+/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
* @val: value
* @ep_ro: right open interval endpoint
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 8b9191a2849e..bf74478926d4 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -168,7 +168,7 @@ static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
#endif /* mul_u64_u32_shr */
#ifndef mul_u64_u64_shr
-static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index b0da04fe087b..34dfcc77f505 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -126,10 +126,11 @@ struct cmos_rtc_board_info {
#endif /* ARCH_RTC_LOCATION */
bool mc146818_does_rtc_work(void);
-int mc146818_get_time(struct rtc_time *time);
+int mc146818_get_time(struct rtc_time *time, int timeout);
int mc146818_set_time(struct rtc_time *time);
bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ int timeout,
void *param);
#endif /* _MC146818RTC_H */
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 1e5893138afe..0b971b24a804 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
struct mcb_device {
struct device dev;
struct mcb_bus *bus;
- bool is_added;
struct mcb_driver *driver;
u16 id;
int inst;
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 27013d6bf24a..79ceee3c8673 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -38,6 +38,7 @@ struct mdio_device {
/* Bus address of the MDIO device (0-31) */
int addr;
int flags;
+ int reset_state;
struct gpio_desc *reset_gpio;
struct reset_control *reset_ctrl;
unsigned int reset_assert_delay;
@@ -106,6 +107,16 @@ int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
+static inline void mdio_device_get(struct mdio_device *mdiodev)
+{
+ get_device(&mdiodev->dev);
+}
+
+static inline void mdio_device_put(struct mdio_device *mdiodev)
+{
+ mdio_device_free(mdiodev);
+}
+
static inline bool mdio_phy_id_is_c45(int phy_id)
{
return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK);
@@ -410,7 +421,7 @@ static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
* A function that translates value of following registers to the linkmode:
* IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20)
* IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60)
- * IEEE 802.3-2018 45.2.7.14 "EEE "link partner ability 1 register (7.61)
+ * IEEE 802.3-2018 45.2.7.14 "EEE link partner ability 1" register (7.61)
*/
static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
{
@@ -486,8 +497,49 @@ static inline u32 linkmode_adv_to_mii_10base_t1_t(unsigned long *adv)
return result;
}
+/**
+ * mii_c73_mod_linkmode - convert a Clause 73 advertisement to linkmodes
+ * @adv: linkmode advertisement setting
+ * @lpa: array of three u16s containing the advertisement
+ *
+ * Convert an IEEE 802.3 Clause 73 advertisement to ethtool link modes.
+ */
+static inline void mii_c73_mod_linkmode(unsigned long *adv, u16 *lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_PAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_ASM_DIR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_1000BASE_KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_CR4);
+ /* 100GBASE_CR10 and 100GBASE_KP4 not implemented */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_CR4);
+ /* 25GBASE_R_S not implemented */
+ /* The 25GBASE_R bit can be used for 25Gbase KR or CR modes */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ adv, lpa[2] & MDIO_AN_C73_2_2500BASE_KX);
+ /* 5GBASE_KR not implemented */
+}
+
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
+ u16 set);
int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
u16 mask, u16 set);
@@ -515,6 +567,30 @@ int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
u32 regnum, u16 mask, u16 set);
+static inline int __mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
+{
+ return __mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
+}
+
+static inline int __mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return __mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
+
+static inline int __mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return __mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
+}
+
+static inline int __mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
+{
+ return __mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
+}
+
static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
{
return mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
diff --git a/include/linux/mdio/mdio-regmap.h b/include/linux/mdio/mdio-regmap.h
new file mode 100644
index 000000000000..679d9069846b
--- /dev/null
+++ b/include/linux/mdio/mdio-regmap.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Driver for MMIO-Mapped MDIO devices. Some IPs expose internal PHYs or PCS
+ * within the MMIO-mapped area
+ *
+ * Copyright (C) 2023 Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+#ifndef MDIO_REGMAP_H
+#define MDIO_REGMAP_H
+
+#include <linux/phy.h>
+
+struct device;
+struct regmap;
+
+struct mdio_regmap_config {
+ struct device *parent;
+ struct regmap *regmap;
+ char name[MII_BUS_ID_SIZE];
+ u8 valid_addr;
+ bool autoscan;
+};
+
+struct mii_bus *devm_mdio_regmap_register(struct device *dev,
+ const struct mdio_regmap_config *config);
+
+#endif
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index fd6e0620658d..b38a56a13f39 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -31,11 +31,11 @@ typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
* @rx_work: async work to execute Rx event callback
* @rx_cb: Drivers register this callback to get asynchronous ME
* Rx buffer pending notifications.
- * @notif_work: async work to execute FW notif event callback
+ * @notif_work: async work to execute FW notify event callback
* @notif_cb: Drivers register this callback to get asynchronous ME
* FW notification pending notifications.
*
- * @do_match: wheather device can be matched with a driver
+ * @do_match: whether the device can be matched with a driver
* @is_added: device is already scanned
* @priv_data: client private data
*/
@@ -94,15 +94,23 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
size_t length);
+ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, unsigned long timeout);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
size_t length);
+ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ unsigned long timeout);
ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
size_t length, u8 vtag);
+ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag, unsigned long timeout);
ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
u8 *vtag);
ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
size_t length, u8 *vtag);
+ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag, unsigned long timeout);
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f82ee3fac1cd..e2082240586d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -40,6 +40,8 @@ extern unsigned long long max_possible_pfn;
* via a driver, and never indicated in the firmware-provided memory map as
* system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
* kernel resource tree.
+ * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are
+ * not initialized (only for reserved regions).
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
@@ -47,6 +49,7 @@ enum memblock_flags {
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
+ MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */
};
/**
@@ -118,17 +121,20 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
void memblock_trim_memory(phys_addr_t align);
+unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+ phys_addr_t base2, phys_addr_t size2);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
+bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
+int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
void memblock_free_all(void);
void memblock_free(void *ptr, size_t size);
-void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
/* Low level functions */
@@ -260,6 +266,11 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
+static inline bool memblock_is_reserved_noinit(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_RSRV_NOINIT;
+}
+
static inline bool memblock_is_driver_managed(struct memblock_region *m)
{
return m->flags & MEMBLOCK_DRIVER_MANAGED;
@@ -582,9 +593,7 @@ extern void *alloc_large_system_hash(const char *tablename,
unsigned long high_limit);
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
- * shift passed via *_hash_shift */
-#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
+#define HASH_ZERO 0x00000002 /* Zero allocated hash table */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
@@ -597,13 +606,11 @@ extern int hashdist; /* Distribute hashes across NUMA nodes? */
#endif
#ifdef CONFIG_MEMTEST
-extern phys_addr_t early_memtest_bad_size; /* Size of faulty ram found by memtest */
-extern bool early_memtest_done; /* Was early memtest done? */
-extern void early_memtest(phys_addr_t start, phys_addr_t end);
+void early_memtest(phys_addr_t start, phys_addr_t end);
+void memtest_report_meminfo(struct seq_file *m);
#else
-static inline void early_memtest(phys_addr_t start, phys_addr_t end)
-{
-}
+static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
+static inline void memtest_report_meminfo(struct seq_file *m) { }
#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 222d7370134c..20ff87f8e001 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -21,6 +21,7 @@
#include <linux/vmstat.h>
#include <linux/writeback.h>
#include <linux/page-flags.h>
+#include <linux/shrinker.h>
struct mem_cgroup;
struct obj_cgroup;
@@ -61,7 +62,6 @@ struct mem_cgroup_reclaim_cookie {
#ifdef CONFIG_MEMCG
#define MEM_CGROUP_ID_SHIFT 16
-#define MEM_CGROUP_ID_MAX USHRT_MAX
struct mem_cgroup_id {
int id;
@@ -89,17 +89,6 @@ struct mem_cgroup_reclaim_iter {
unsigned int generation;
};
-/*
- * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
- * shrinkers, which have elements charged to this memcg.
- */
-struct shrinker_info {
- struct rcu_head rcu;
- atomic_long_t *nr_deferred;
- unsigned long *map;
- int map_nr_max;
-};
-
struct lruvec_stats_percpu {
/* Local (CPU and cgroup) state */
long state[NR_VM_NODE_STAT_ITEMS];
@@ -112,6 +101,9 @@ struct lruvec_stats {
/* Aggregated (CPU and subtree) state */
long state[NR_VM_NODE_STAT_ITEMS];
+ /* Non-hierarchical (CPU aggregated) state */
+ long state_local[NR_VM_NODE_STAT_ITEMS];
+
/* Pending child counts during tree propagation */
long state_pending[NR_VM_NODE_STAT_ITEMS];
};
@@ -151,7 +143,7 @@ struct mem_cgroup_threshold_ary {
/* Size of entries[] */
unsigned int size;
/* Array of thresholds */
- struct mem_cgroup_threshold entries[];
+ struct mem_cgroup_threshold entries[] __counted_by(size);
};
struct mem_cgroup_thresholds {
@@ -227,6 +219,12 @@ struct mem_cgroup {
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
unsigned long zswap_max;
+
+ /*
+ * Prevent pages from this memcg from being written back from zswap to
+ * swap, and from being swapped out on zswap store failures.
+ */
+ bool zswap_writeback;
#endif
unsigned long soft_limit;
@@ -284,6 +282,11 @@ struct mem_cgroup {
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
+ /*
+ * Hint of reclaim pressure for socket memroy management. Note
+ * that this indicator should NOT be used in legacy cgroup mode
+ * where socket memory is accounted/charged separately.
+ */
unsigned long socket_pressure;
/* Legacy tcp memory accounting */
@@ -292,7 +295,13 @@ struct mem_cgroup {
#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
- struct obj_cgroup __rcu *objcg;
+ /*
+ * memcg->objcg is wiped out as a part of the objcg repaprenting
+ * process. memcg->orig_objcg preserves a pointer (and a reference)
+ * to the original objcg until the end of live of memcg.
+ */
+ struct obj_cgroup __rcu *objcg;
+ struct obj_cgroup *orig_objcg;
/* list of inherited objcgs, protected by objcg_lock */
struct list_head objcg_list;
#endif
@@ -321,7 +330,7 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
-#ifdef CONFIG_LRU_GEN
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* per-memcg mm_struct list */
struct lru_gen_mm_list mm_list;
#endif
@@ -419,7 +428,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
*
* - the folio lock
* - LRU isolation
- * - lock_page_memcg()
+ * - folio_memcg_lock()
* - exclusive reference
* - mem_cgroup_trylock_pages()
*
@@ -583,7 +592,7 @@ static inline void mem_cgroup_protection(struct mem_cgroup *root,
/*
* There is no reclaim protection applied to a targeted reclaim.
* We are special casing this specific case here because
- * mem_cgroup_protected calculation is not robust enough to keep
+ * mem_cgroup_calculate_protection is not robust enough to keep
* the protection invariant for calculated effective values for
* parallel reclaimers with different reclaim target. This is
* especially a problem for tail memcgs (as they have pages on LRU)
@@ -655,6 +664,8 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
page_counter_read(&memcg->memory);
}
+void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
+
int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
/**
@@ -679,6 +690,9 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
return __mem_cgroup_charge(folio, mm, gfp);
}
+int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
+ long nr_pages);
+
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
@@ -706,6 +720,10 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
__mem_cgroup_uncharge_list(page_list);
}
+void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
+
+void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
+
void mem_cgroup_migrate(struct folio *old, struct folio *new);
/**
@@ -762,6 +780,8 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
+struct mem_cgroup *get_mem_cgroup_from_current(void);
+
struct lruvec *folio_lruvec_lock(struct folio *folio);
struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
@@ -807,6 +827,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
return !memcg || css_tryget(&memcg->css);
}
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget_online(&memcg->css);
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
if (memcg)
@@ -820,8 +845,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
-int mem_cgroup_scan_tasks(struct mem_cgroup *,
- int (*)(struct task_struct *, void *), void *);
+void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ int (*)(struct task_struct *, void *), void *arg);
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
@@ -861,8 +886,7 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
* parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find
*
- * Returns the parent memcg, or NULL if this is the root or the memory
- * controller is in legacy no-hierarchy mode.
+ * Returns the parent memcg, or NULL if this is the root.
*/
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
@@ -914,7 +938,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(void);
+void mem_cgroup_handle_over_high(gfp_t gfp_mask);
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
@@ -949,8 +973,6 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
void folio_memcg_lock(struct folio *folio);
void folio_memcg_unlock(struct folio *folio);
-void lock_page_memcg(struct page *page);
-void unlock_page_memcg(struct page *page);
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
@@ -1022,14 +1044,12 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
{
struct mem_cgroup_per_node *pn;
long x = 0;
- int cpu;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for_each_possible_cpu(cpu)
- x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
+ x = READ_ONCE(pn->lruvec_stats.state_local[idx]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -1037,9 +1057,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return x;
}
-void mem_cgroup_flush_stats(void);
-void mem_cgroup_flush_stats_atomic(void);
-void mem_cgroup_flush_stats_ratelimited(void);
+void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
+void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
@@ -1079,15 +1098,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
-static inline void count_memcg_page_event(struct page *page,
- enum vm_event_item idx)
-{
- struct mem_cgroup *memcg = page_memcg(page);
-
- if (memcg)
- count_memcg_events(memcg, idx, 1);
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
@@ -1161,7 +1171,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
-#define MEM_CGROUP_ID_MAX 0
static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{
@@ -1189,6 +1198,11 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ return NULL;
+}
+
static inline bool folio_memcg_kmem(struct folio *folio)
{
return false;
@@ -1249,12 +1263,23 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
return false;
}
+static inline void mem_cgroup_commit_charge(struct folio *folio,
+ struct mem_cgroup *memcg)
+{
+}
+
static inline int mem_cgroup_charge(struct folio *folio,
struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
+static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
+ gfp_t gfp, long nr_pages)
+{
+ return 0;
+}
+
static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
@@ -1273,6 +1298,16 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
+static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
+ unsigned int nr_pages)
+{
+}
+
+static inline void mem_cgroup_replace_folio(struct folio *old,
+ struct folio *new)
+{
+}
+
static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
@@ -1310,6 +1345,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
+{
+ return NULL;
+}
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
@@ -1325,6 +1365,11 @@ static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
return true;
}
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
@@ -1367,10 +1412,9 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
{
}
-static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*fn)(struct task_struct *, void *), void *arg)
{
- return 0;
}
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
@@ -1439,14 +1483,6 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}
-static inline void lock_page_memcg(struct page *page)
-{
-}
-
-static inline void unlock_page_memcg(struct page *page)
-{
-}
-
static inline void folio_memcg_lock(struct folio *folio)
{
}
@@ -1467,7 +1503,7 @@ static inline void mem_cgroup_unlock_pages(void)
rcu_read_unlock();
}
-static inline void mem_cgroup_handle_over_high(void)
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}
@@ -1533,15 +1569,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void mem_cgroup_flush_stats(void)
-{
-}
-
-static inline void mem_cgroup_flush_stats_atomic(void)
+static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
{
}
-static inline void mem_cgroup_flush_stats_ratelimited(void)
+static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
}
@@ -1578,11 +1610,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
{
}
-static inline void count_memcg_page_event(struct page *page,
- int idx)
-{
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
@@ -1743,8 +1770,8 @@ void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
- return true;
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ return !!memcg->tcpmem_pressure;
do {
if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
return true;
@@ -1776,8 +1803,26 @@ bool mem_cgroup_kmem_disabled(void);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
-struct obj_cgroup *get_obj_cgroup_from_current(void);
-struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
+/*
+ * The returned objcg pointer is safe to use without additional
+ * protection within a scope. The scope is defined either by
+ * the current task (similar to the "current" global variable)
+ * or by set_active_memcg() pair.
+ * Please, use obj_cgroup_get() to get a reference if the pointer
+ * needs to be used outside of the local scope.
+ */
+struct obj_cgroup *current_obj_cgroup(void);
+struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
+
+static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
+{
+ struct obj_cgroup *objcg = current_obj_cgroup();
+
+ if (objcg)
+ obj_cgroup_get(objcg);
+
+ return objcg;
+}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
@@ -1861,7 +1906,7 @@ static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
{
}
-static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
+static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
{
return NULL;
}
@@ -1902,6 +1947,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
#else
static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
{
@@ -1915,6 +1961,11 @@ static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
size_t size)
{
}
+static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
+{
+ /* if zswap is disabled, do not block pages going to the swapping device */
+ return true;
+}
#endif
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index fc9647b1b4f9..69e781900082 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -6,6 +6,7 @@
#include <linux/nodemask.h>
#include <linux/kref.h>
#include <linux/mmzone.h>
+#include <linux/notifier.h>
/*
* Each tier cover a abstrace distance chunk size of 128
*/
@@ -22,7 +23,9 @@
struct memory_tier;
struct memory_dev_type {
/* list of memory types that are part of same tier as this type */
- struct list_head tier_sibiling;
+ struct list_head tier_sibling;
+ /* list of memory types that are managed by one driver */
+ struct list_head list;
/* abstract distance for this specific memory type */
int adistance;
/* Nodes of same abstract distance */
@@ -30,12 +33,21 @@ struct memory_dev_type {
struct kref kref;
};
+struct access_coordinate;
+
#ifdef CONFIG_NUMA
extern bool numa_demotion_enabled;
+extern struct memory_dev_type *default_dram_type;
struct memory_dev_type *alloc_memory_type(int adistance);
-void destroy_memory_type(struct memory_dev_type *memtype);
+void put_memory_type(struct memory_dev_type *memtype);
void init_node_memory_type(int node, struct memory_dev_type *default_type);
void clear_node_memory_type(int node, struct memory_dev_type *memtype);
+int register_mt_adistance_algorithm(struct notifier_block *nb);
+int unregister_mt_adistance_algorithm(struct notifier_block *nb);
+int mt_calc_adistance(int node, int *adist);
+int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
+ const char *source);
+int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
#ifdef CONFIG_MIGRATION
int next_demotion_node(int node);
void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
@@ -60,6 +72,7 @@ static inline bool node_is_toptier(int node)
#else
#define numa_demotion_enabled false
+#define default_dram_type NULL
/*
* CONFIG_NUMA implementation returns non NULL error.
*/
@@ -68,7 +81,7 @@ static inline struct memory_dev_type *alloc_memory_type(int adistance)
return NULL;
}
-static inline void destroy_memory_type(struct memory_dev_type *memtype)
+static inline void put_memory_type(struct memory_dev_type *memtype)
{
}
@@ -97,5 +110,31 @@ static inline bool node_is_toptier(int node)
{
return true;
}
+
+static inline int register_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int mt_calc_adistance(int node, int *adist)
+{
+ return NOTIFY_DONE;
+}
+
+static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
+ const char *source)
+{
+ return -EIO;
+}
+
+static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
+{
+ return -EIO;
+}
#endif /* CONFIG_NUMA */
#endif /* _LINUX_MEMORY_TIERS_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 31343566c221..f53cfdaaaa41 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -77,11 +77,7 @@ struct memory_block {
*/
struct zone *zone;
struct device dev;
- /*
- * Number of vmemmap pages. These pages
- * lay at the beginning of the memory block.
- */
- unsigned long nr_vmemmap_pages;
+ struct vmem_altmap *altmap;
struct memory_group *group; /* group (if any) for this block */
struct list_head group_next; /* next block inside memory group */
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
@@ -147,7 +143,7 @@ static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size,
- unsigned long vmemmap_pages,
+ struct vmem_altmap *altmap,
struct memory_group *group);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 9fcbf5706595..7d2076583494 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -97,6 +97,8 @@ typedef int __bitwise mhp_t;
* To do so, we will use the beginning of the hot-added range to build
* the page tables for the memmap array that describes the entire range.
* Only selected architectures support it with SPARSE_VMEMMAP.
+ * This is only a hint, the core kernel can decide to not do this based on
+ * different alignment checks.
*/
#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
/*
@@ -326,9 +328,6 @@ static inline int remove_memory(u64 start, u64 size)
static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-extern void set_zone_contiguous(struct zone *zone);
-extern void clear_zone_contiguous(struct zone *zone);
-
#ifdef CONFIG_MEMORY_HOTPLUG
extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
@@ -347,9 +346,8 @@ extern void remove_pfn_range_from_zone(struct zone *zone,
extern int sparse_add_section(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap);
-extern void sparse_remove_section(struct mem_section *ms,
- unsigned long pfn, unsigned long nr_pages,
- unsigned long map_offset, struct vmem_altmap *altmap);
+extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
extern struct zone *zone_for_pfn_range(int online_type, int nid,
@@ -358,7 +356,6 @@ extern struct zone *zone_for_pfn_range(int online_type, int nid,
extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
struct mhp_params *params);
void arch_remove_linear_mapping(u64 start, u64 size);
-extern bool mhp_supports_memmap_on_memory(unsigned long size);
#endif /* CONFIG_MEMORY_HOTPLUG */
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index d232de7cdc56..931b118336f4 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -17,6 +17,8 @@
struct mm_struct;
+#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */
+
#ifdef CONFIG_NUMA
/*
@@ -89,8 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
return pol;
}
-#define vma_policy(vma) ((vma)->vm_policy)
-
static inline void mpol_get(struct mempolicy *pol)
{
if (pol)
@@ -107,35 +107,30 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
/*
* Tree of shared policies for a shared memory region.
- * Maintain the policies in a pseudo mm that contains vmas. The vmas
- * carry the policy. As a special twist the pseudo mm is indexed in pages, not
- * bytes, so that we can work with shared memory segments bigger than
- * unsigned long.
*/
-
-struct sp_node {
- struct rb_node nd;
- unsigned long start, end;
- struct mempolicy *policy;
-};
-
struct shared_policy {
struct rb_root root;
rwlock_t lock;
};
+struct sp_node {
+ struct rb_node nd;
+ pgoff_t start, end;
+ struct mempolicy *policy;
+};
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
-int mpol_set_shared_policy(struct shared_policy *info,
- struct vm_area_struct *vma,
- struct mempolicy *new);
-void mpol_free_shared_policy(struct shared_policy *p);
+int mpol_set_shared_policy(struct shared_policy *sp,
+ struct vm_area_struct *vma, struct mempolicy *mpol);
+void mpol_free_shared_policy(struct shared_policy *sp);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
- unsigned long idx);
+ pgoff_t idx);
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, pgoff_t *ilx);
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
@@ -149,8 +144,6 @@ extern int huge_node(struct vm_area_struct *vma,
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
-extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
-
extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
@@ -174,7 +167,7 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
extern bool vma_migratable(struct vm_area_struct *vma);
-extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
@@ -188,12 +181,17 @@ extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
struct mempolicy {};
+static inline struct mempolicy *get_task_policy(struct task_struct *p)
+{
+ return NULL;
+}
+
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
return true;
}
-static inline void mpol_put(struct mempolicy *p)
+static inline void mpol_put(struct mempolicy *pol)
{
}
@@ -212,17 +210,22 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp,
{
}
-static inline void mpol_free_shared_policy(struct shared_policy *p)
+static inline void mpol_free_shared_policy(struct shared_policy *sp)
{
}
static inline struct mempolicy *
-mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx)
{
return NULL;
}
-#define vma_policy(vma) NULL
+static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx)
+{
+ *ilx = 0;
+ return NULL;
+}
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
@@ -278,7 +281,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
}
#endif
-static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
+static inline int mpol_misplaced(struct folio *folio,
+ struct vm_area_struct *vma,
unsigned long address)
{
return -1; /* no node preference */
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 4aae6c06c5f2..7be1e32e6d42 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -51,6 +51,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
extern int mempool_resize(mempool_t *pool, int new_min_nr);
extern void mempool_destroy(mempool_t *pool);
extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
+extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc;
extern void mempool_free(void *element, mempool_t *pool);
/*
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 1314d9c5f05b..744c830f4b13 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -196,8 +196,6 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
unsigned long memremap_compat_align(void);
#else
static inline void *devm_memremap_pages(struct device *dev,
@@ -228,16 +226,6 @@ static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
return false;
}
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
-{
- return 0;
-}
-
-static inline void vmem_altmap_free(struct vmem_altmap *altmap,
- unsigned long nr_pfns)
-{
-}
-
/* when memremap_pages() is disabled all archs can remap a single page */
static inline unsigned long memremap_compat_align(void)
{
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index 473545a2c425..6fa21791fc85 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -472,13 +472,7 @@ extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
unsigned char);
-extern int pm860x_page_reg_read(struct i2c_client *, int);
extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char);
extern int pm860x_page_bulk_read(struct i2c_client *, int, int,
unsigned char *);
-extern int pm860x_page_bulk_write(struct i2c_client *, int, int,
- unsigned char *);
-extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char,
- unsigned char);
-
#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 302a330c5c84..76d326ea8eba 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -382,10 +382,6 @@ struct ab8500_platform_data {
struct ab8500_sysctrl_platform_data *sysctrl;
};
-extern int ab8500_init(struct ab8500 *ab8500,
- enum ab8500_version version);
-extern int ab8500_exit(struct ab8500 *ab8500);
-
extern int ab8500_suspend(struct ab8500 *ab8500);
static inline int is_ab8500(struct ab8500 *ab)
@@ -503,13 +499,7 @@ static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab)
void ab8500_override_turn_on_stat(u8 mask, u8 set);
-#ifdef CONFIG_AB8500_DEBUG
-extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
-void ab8500_dump_all_banks(struct device *dev);
-void ab8500_debug_register_interrupt(int line);
-#else
static inline void ab8500_dump_all_banks(struct device *dev) {}
static inline void ab8500_debug_register_interrupt(int line) {}
-#endif
#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index beb3f44f85c5..f1755163dd9f 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,11 +12,13 @@
enum axp20x_variants {
AXP152_ID = 0,
+ AXP192_ID,
AXP202_ID,
AXP209_ID,
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP313A_ID,
AXP803_ID,
AXP806_ID,
AXP809_ID,
@@ -25,6 +27,7 @@ enum axp20x_variants {
NR_AXP20X_VARIANTS,
};
+#define AXP192_DATACACHE(m) (0x06 + (m))
#define AXP20X_DATACACHE(m) (0x04 + (m))
/* Power supply */
@@ -46,6 +49,13 @@ enum axp20x_variants {
#define AXP152_DCDC_FREQ 0x37
#define AXP152_DCDC_MODE 0x80
+#define AXP192_USB_OTG_STATUS 0x04
+#define AXP192_PWR_OUT_CTRL 0x12
+#define AXP192_DCDC2_V_OUT 0x23
+#define AXP192_DCDC1_V_OUT 0x26
+#define AXP192_DCDC3_V_OUT 0x27
+#define AXP192_LDO2_3_V_OUT 0x28
+
#define AXP20X_PWR_INPUT_STATUS 0x00
#define AXP20X_PWR_OP_MODE 0x01
#define AXP20X_USB_OTG_STATUS 0x02
@@ -92,6 +102,17 @@ enum axp20x_variants {
#define AXP22X_ALDO3_V_OUT 0x2a
#define AXP22X_CHRG_CTRL3 0x35
+#define AXP313A_ON_INDICATE 0x00
+#define AXP313A_OUTPUT_CONTROL 0x10
+#define AXP313A_DCDC1_CONRTOL 0x13
+#define AXP313A_DCDC2_CONRTOL 0x14
+#define AXP313A_DCDC3_CONRTOL 0x15
+#define AXP313A_ALDO1_CONRTOL 0x16
+#define AXP313A_DLDO1_CONRTOL 0x17
+#define AXP313A_SHUTDOWN_CTRL 0x1a
+#define AXP313A_IRQ_EN 0x20
+#define AXP313A_IRQ_STATE 0x21
+
#define AXP806_STARTUP_SRC 0x00
#define AXP806_CHIP_ID 0x03
#define AXP806_PWR_OUT_CTRL1 0x10
@@ -173,6 +194,17 @@ enum axp20x_variants {
#define AXP152_IRQ2_STATE 0x49
#define AXP152_IRQ3_STATE 0x4a
+#define AXP192_IRQ1_EN 0x40
+#define AXP192_IRQ2_EN 0x41
+#define AXP192_IRQ3_EN 0x42
+#define AXP192_IRQ4_EN 0x43
+#define AXP192_IRQ1_STATE 0x44
+#define AXP192_IRQ2_STATE 0x45
+#define AXP192_IRQ3_STATE 0x46
+#define AXP192_IRQ4_STATE 0x47
+#define AXP192_IRQ5_EN 0x4a
+#define AXP192_IRQ5_STATE 0x4d
+
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
#define AXP20X_IRQ3_EN 0x42
@@ -192,6 +224,11 @@ enum axp20x_variants {
#define AXP15060_IRQ2_STATE 0x49
/* ADC */
+#define AXP192_GPIO2_V_ADC_H 0x68
+#define AXP192_GPIO2_V_ADC_L 0x69
+#define AXP192_GPIO3_V_ADC_H 0x6a
+#define AXP192_GPIO3_V_ADC_L 0x6b
+
#define AXP20X_ACIN_V_ADC_H 0x56
#define AXP20X_ACIN_V_ADC_L 0x57
#define AXP20X_ACIN_I_ADC_H 0x58
@@ -221,6 +258,8 @@ enum axp20x_variants {
#define AXP20X_IPSOUT_V_HIGH_L 0x7f
/* Power supply */
+#define AXP192_GPIO30_IN_RANGE 0x85
+
#define AXP20X_DCDC_MODE 0x80
#define AXP20X_ADC_EN1 0x82
#define AXP20X_ADC_EN2 0x83
@@ -249,6 +288,16 @@ enum axp20x_variants {
#define AXP152_PWM1_FREQ_Y 0x9c
#define AXP152_PWM1_DUTY_CYCLE 0x9d
+#define AXP192_GPIO0_CTRL 0x90
+#define AXP192_LDO_IO0_V_OUT 0x91
+#define AXP192_GPIO1_CTRL 0x92
+#define AXP192_GPIO2_CTRL 0x93
+#define AXP192_GPIO2_0_STATE 0x94
+#define AXP192_GPIO4_3_CTRL 0x95
+#define AXP192_GPIO4_3_STATE 0x96
+#define AXP192_GPIO2_0_PULL 0x97
+#define AXP192_N_RSTO_CTRL 0x9e
+
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
#define AXP20X_GPIO1_CTRL 0x92
@@ -329,6 +378,17 @@ enum axp20x_variants {
/* Regulators IDs */
enum {
+ AXP192_DCDC1 = 0,
+ AXP192_DCDC2,
+ AXP192_DCDC3,
+ AXP192_LDO1,
+ AXP192_LDO2,
+ AXP192_LDO3,
+ AXP192_LDO_IO0,
+ AXP192_REG_ID_MAX
+};
+
+enum {
AXP20X_LDO1 = 0,
AXP20X_LDO2,
AXP20X_LDO3,
@@ -364,6 +424,16 @@ enum {
};
enum {
+ AXP313A_DCDC1 = 0,
+ AXP313A_DCDC2,
+ AXP313A_DCDC3,
+ AXP313A_ALDO1,
+ AXP313A_DLDO1,
+ AXP313A_RTC_LDO,
+ AXP313A_REG_ID_MAX,
+};
+
+enum {
AXP806_DCDCA = 0,
AXP806_DCDCB,
AXP806_DCDCC,
@@ -509,6 +579,42 @@ enum {
AXP152_IRQ_GPIO0_INPUT,
};
+enum axp192_irqs {
+ AXP192_IRQ_ACIN_OVER_V = 1,
+ AXP192_IRQ_ACIN_PLUGIN,
+ AXP192_IRQ_ACIN_REMOVAL,
+ AXP192_IRQ_VBUS_OVER_V,
+ AXP192_IRQ_VBUS_PLUGIN,
+ AXP192_IRQ_VBUS_REMOVAL,
+ AXP192_IRQ_VBUS_V_LOW,
+ AXP192_IRQ_BATT_PLUGIN,
+ AXP192_IRQ_BATT_REMOVAL,
+ AXP192_IRQ_BATT_ENT_ACT_MODE,
+ AXP192_IRQ_BATT_EXIT_ACT_MODE,
+ AXP192_IRQ_CHARG,
+ AXP192_IRQ_CHARG_DONE,
+ AXP192_IRQ_BATT_TEMP_HIGH,
+ AXP192_IRQ_BATT_TEMP_LOW,
+ AXP192_IRQ_DIE_TEMP_HIGH,
+ AXP192_IRQ_CHARG_I_LOW,
+ AXP192_IRQ_DCDC1_V_LONG,
+ AXP192_IRQ_DCDC2_V_LONG,
+ AXP192_IRQ_DCDC3_V_LONG,
+ AXP192_IRQ_PEK_SHORT = 22,
+ AXP192_IRQ_PEK_LONG,
+ AXP192_IRQ_N_OE_PWR_ON,
+ AXP192_IRQ_N_OE_PWR_OFF,
+ AXP192_IRQ_VBUS_VALID,
+ AXP192_IRQ_VBUS_NOT_VALID,
+ AXP192_IRQ_VBUS_SESS_VALID,
+ AXP192_IRQ_VBUS_SESS_END,
+ AXP192_IRQ_LOW_PWR_LVL = 31,
+ AXP192_IRQ_TIMER,
+ AXP192_IRQ_GPIO2_INPUT = 37,
+ AXP192_IRQ_GPIO1_INPUT,
+ AXP192_IRQ_GPIO0_INPUT,
+};
+
enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
AXP20X_IRQ_ACIN_PLUGIN,
@@ -616,6 +722,16 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp313a_irqs {
+ AXP313A_IRQ_DIE_TEMP_HIGH,
+ AXP313A_IRQ_DCDC2_V_LOW = 2,
+ AXP313A_IRQ_DCDC3_V_LOW,
+ AXP313A_IRQ_PEK_LONG,
+ AXP313A_IRQ_PEK_SHORT,
+ AXP313A_IRQ_PEK_FAL_EDGE,
+ AXP313A_IRQ_PEK_RIS_EDGE,
+};
+
enum axp803_irqs {
AXP803_IRQ_ACIN_OVER_V = 1,
AXP803_IRQ_ACIN_PLUGIN,
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 47e7a3a61ce6..e8bcad641d8c 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -92,7 +92,7 @@ struct mfd_cell {
* (above) when matching OF nodes with devices that have identical
* compatible strings
*/
- const u64 of_reg;
+ u64 of_reg;
/* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
bool use_of_reg;
diff --git a/include/linux/mfd/cs42l43-regs.h b/include/linux/mfd/cs42l43-regs.h
new file mode 100644
index 000000000000..c39a49269cb7
--- /dev/null
+++ b/include/linux/mfd/cs42l43-regs.h
@@ -0,0 +1,1184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cs42l43 register definitions
+ *
+ * Copyright (c) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_CORE_REGS_H
+#define CS42L43_CORE_REGS_H
+
+/* Registers */
+#define CS42L43_GEN_INT_STAT_1 0x000000C0
+#define CS42L43_GEN_INT_MASK_1 0x000000C1
+#define CS42L43_DEVID 0x00003000
+#define CS42L43_REVID 0x00003004
+#define CS42L43_RELID 0x0000300C
+#define CS42L43_SFT_RESET 0x00003020
+#define CS42L43_DRV_CTRL1 0x00006004
+#define CS42L43_DRV_CTRL3 0x0000600C
+#define CS42L43_DRV_CTRL4 0x00006010
+#define CS42L43_DRV_CTRL_5 0x00006014
+#define CS42L43_GPIO_CTRL1 0x00006034
+#define CS42L43_GPIO_CTRL2 0x00006038
+#define CS42L43_GPIO_STS 0x0000603C
+#define CS42L43_GPIO_FN_SEL 0x00006040
+#define CS42L43_MCLK_SRC_SEL 0x00007004
+#define CS42L43_CCM_BLK_CLK_CONTROL 0x00007010
+#define CS42L43_SAMPLE_RATE1 0x00007014
+#define CS42L43_SAMPLE_RATE2 0x00007018
+#define CS42L43_SAMPLE_RATE3 0x0000701C
+#define CS42L43_SAMPLE_RATE4 0x00007020
+#define CS42L43_PLL_CONTROL 0x00007034
+#define CS42L43_FS_SELECT1 0x00007038
+#define CS42L43_FS_SELECT2 0x0000703C
+#define CS42L43_FS_SELECT3 0x00007040
+#define CS42L43_FS_SELECT4 0x00007044
+#define CS42L43_PDM_CONTROL 0x0000704C
+#define CS42L43_ASP_CLK_CONFIG1 0x00007058
+#define CS42L43_ASP_CLK_CONFIG2 0x0000705C
+#define CS42L43_OSC_DIV_SEL 0x00007068
+#define CS42L43_ADC_B_CTRL1 0x00008000
+#define CS42L43_ADC_B_CTRL2 0x00008004
+#define CS42L43_DECIM_HPF_WNF_CTRL1 0x0000803C
+#define CS42L43_DECIM_HPF_WNF_CTRL2 0x00008040
+#define CS42L43_DECIM_HPF_WNF_CTRL3 0x00008044
+#define CS42L43_DECIM_HPF_WNF_CTRL4 0x00008048
+#define CS42L43_DMIC_PDM_CTRL 0x0000804C
+#define CS42L43_DECIM_VOL_CTRL_CH1_CH2 0x00008050
+#define CS42L43_DECIM_VOL_CTRL_CH3_CH4 0x00008054
+#define CS42L43_DECIM_VOL_CTRL_UPDATE 0x00008058
+#define CS42L43_INTP_VOLUME_CTRL1 0x00009008
+#define CS42L43_INTP_VOLUME_CTRL2 0x0000900C
+#define CS42L43_AMP1_2_VOL_RAMP 0x00009010
+#define CS42L43_ASP_CTRL 0x0000A000
+#define CS42L43_ASP_FSYNC_CTRL1 0x0000A004
+#define CS42L43_ASP_FSYNC_CTRL2 0x0000A008
+#define CS42L43_ASP_FSYNC_CTRL3 0x0000A00C
+#define CS42L43_ASP_FSYNC_CTRL4 0x0000A010
+#define CS42L43_ASP_DATA_CTRL 0x0000A018
+#define CS42L43_ASP_RX_EN 0x0000A020
+#define CS42L43_ASP_TX_EN 0x0000A024
+#define CS42L43_ASP_RX_CH1_CTRL 0x0000A028
+#define CS42L43_ASP_RX_CH2_CTRL 0x0000A02C
+#define CS42L43_ASP_RX_CH3_CTRL 0x0000A030
+#define CS42L43_ASP_RX_CH4_CTRL 0x0000A034
+#define CS42L43_ASP_RX_CH5_CTRL 0x0000A038
+#define CS42L43_ASP_RX_CH6_CTRL 0x0000A03C
+#define CS42L43_ASP_TX_CH1_CTRL 0x0000A068
+#define CS42L43_ASP_TX_CH2_CTRL 0x0000A06C
+#define CS42L43_ASP_TX_CH3_CTRL 0x0000A070
+#define CS42L43_ASP_TX_CH4_CTRL 0x0000A074
+#define CS42L43_ASP_TX_CH5_CTRL 0x0000A078
+#define CS42L43_ASP_TX_CH6_CTRL 0x0000A07C
+#define CS42L43_OTP_REVISION_ID 0x0000B02C
+#define CS42L43_ASPTX1_INPUT 0x0000C200
+#define CS42L43_ASPTX2_INPUT 0x0000C210
+#define CS42L43_ASPTX3_INPUT 0x0000C220
+#define CS42L43_ASPTX4_INPUT 0x0000C230
+#define CS42L43_ASPTX5_INPUT 0x0000C240
+#define CS42L43_ASPTX6_INPUT 0x0000C250
+#define CS42L43_SWIRE_DP1_CH1_INPUT 0x0000C280
+#define CS42L43_SWIRE_DP1_CH2_INPUT 0x0000C290
+#define CS42L43_SWIRE_DP1_CH3_INPUT 0x0000C2A0
+#define CS42L43_SWIRE_DP1_CH4_INPUT 0x0000C2B0
+#define CS42L43_SWIRE_DP2_CH1_INPUT 0x0000C2C0
+#define CS42L43_SWIRE_DP2_CH2_INPUT 0x0000C2D0
+#define CS42L43_SWIRE_DP3_CH1_INPUT 0x0000C2E0
+#define CS42L43_SWIRE_DP3_CH2_INPUT 0x0000C2F0
+#define CS42L43_SWIRE_DP4_CH1_INPUT 0x0000C300
+#define CS42L43_SWIRE_DP4_CH2_INPUT 0x0000C310
+#define CS42L43_ASRC_INT1_INPUT1 0x0000C400
+#define CS42L43_ASRC_INT2_INPUT1 0x0000C410
+#define CS42L43_ASRC_INT3_INPUT1 0x0000C420
+#define CS42L43_ASRC_INT4_INPUT1 0x0000C430
+#define CS42L43_ASRC_DEC1_INPUT1 0x0000C440
+#define CS42L43_ASRC_DEC2_INPUT1 0x0000C450
+#define CS42L43_ASRC_DEC3_INPUT1 0x0000C460
+#define CS42L43_ASRC_DEC4_INPUT1 0x0000C470
+#define CS42L43_ISRC1INT1_INPUT1 0x0000C500
+#define CS42L43_ISRC1INT2_INPUT1 0x0000C510
+#define CS42L43_ISRC1DEC1_INPUT1 0x0000C520
+#define CS42L43_ISRC1DEC2_INPUT1 0x0000C530
+#define CS42L43_ISRC2INT1_INPUT1 0x0000C540
+#define CS42L43_ISRC2INT2_INPUT1 0x0000C550
+#define CS42L43_ISRC2DEC1_INPUT1 0x0000C560
+#define CS42L43_ISRC2DEC2_INPUT1 0x0000C570
+#define CS42L43_EQ1MIX_INPUT1 0x0000C580
+#define CS42L43_EQ1MIX_INPUT2 0x0000C584
+#define CS42L43_EQ1MIX_INPUT3 0x0000C588
+#define CS42L43_EQ1MIX_INPUT4 0x0000C58C
+#define CS42L43_EQ2MIX_INPUT1 0x0000C590
+#define CS42L43_EQ2MIX_INPUT2 0x0000C594
+#define CS42L43_EQ2MIX_INPUT3 0x0000C598
+#define CS42L43_EQ2MIX_INPUT4 0x0000C59C
+#define CS42L43_SPDIF1_INPUT1 0x0000C600
+#define CS42L43_SPDIF2_INPUT1 0x0000C610
+#define CS42L43_AMP1MIX_INPUT1 0x0000C620
+#define CS42L43_AMP1MIX_INPUT2 0x0000C624
+#define CS42L43_AMP1MIX_INPUT3 0x0000C628
+#define CS42L43_AMP1MIX_INPUT4 0x0000C62C
+#define CS42L43_AMP2MIX_INPUT1 0x0000C630
+#define CS42L43_AMP2MIX_INPUT2 0x0000C634
+#define CS42L43_AMP2MIX_INPUT3 0x0000C638
+#define CS42L43_AMP2MIX_INPUT4 0x0000C63C
+#define CS42L43_AMP3MIX_INPUT1 0x0000C640
+#define CS42L43_AMP3MIX_INPUT2 0x0000C644
+#define CS42L43_AMP3MIX_INPUT3 0x0000C648
+#define CS42L43_AMP3MIX_INPUT4 0x0000C64C
+#define CS42L43_AMP4MIX_INPUT1 0x0000C650
+#define CS42L43_AMP4MIX_INPUT2 0x0000C654
+#define CS42L43_AMP4MIX_INPUT3 0x0000C658
+#define CS42L43_AMP4MIX_INPUT4 0x0000C65C
+#define CS42L43_ASRC_INT_ENABLES 0x0000E000
+#define CS42L43_ASRC_DEC_ENABLES 0x0000E004
+#define CS42L43_PDNCNTL 0x00010000
+#define CS42L43_RINGSENSE_DEB_CTRL 0x0001001C
+#define CS42L43_TIPSENSE_DEB_CTRL 0x00010020
+#define CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS 0x00010028
+#define CS42L43_HS2 0x00010040
+#define CS42L43_HS_STAT 0x00010048
+#define CS42L43_MCU_SW_INTERRUPT 0x00010094
+#define CS42L43_STEREO_MIC_CTRL 0x000100A4
+#define CS42L43_STEREO_MIC_CLAMP_CTRL 0x000100C4
+#define CS42L43_BLOCK_EN2 0x00010104
+#define CS42L43_BLOCK_EN3 0x00010108
+#define CS42L43_BLOCK_EN4 0x0001010C
+#define CS42L43_BLOCK_EN5 0x00010110
+#define CS42L43_BLOCK_EN6 0x00010114
+#define CS42L43_BLOCK_EN7 0x00010118
+#define CS42L43_BLOCK_EN8 0x0001011C
+#define CS42L43_BLOCK_EN9 0x00010120
+#define CS42L43_BLOCK_EN10 0x00010124
+#define CS42L43_BLOCK_EN11 0x00010128
+#define CS42L43_TONE_CH1_CTRL 0x00010134
+#define CS42L43_TONE_CH2_CTRL 0x00010138
+#define CS42L43_MIC_DETECT_CONTROL_1 0x00011074
+#define CS42L43_DETECT_STATUS_1 0x0001107C
+#define CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL 0x00011090
+#define CS42L43_MIC_DETECT_CONTROL_ANDROID 0x000110B0
+#define CS42L43_ISRC1_CTRL 0x00012004
+#define CS42L43_ISRC2_CTRL 0x00013004
+#define CS42L43_CTRL_REG 0x00014000
+#define CS42L43_FDIV_FRAC 0x00014004
+#define CS42L43_CAL_RATIO 0x00014008
+#define CS42L43_SPI_CLK_CONFIG1 0x00016004
+#define CS42L43_SPI_CONFIG1 0x00016010
+#define CS42L43_SPI_CONFIG2 0x00016014
+#define CS42L43_SPI_CONFIG3 0x00016018
+#define CS42L43_SPI_CONFIG4 0x00016024
+#define CS42L43_SPI_STATUS1 0x00016100
+#define CS42L43_SPI_STATUS2 0x00016104
+#define CS42L43_TRAN_CONFIG1 0x00016200
+#define CS42L43_TRAN_CONFIG2 0x00016204
+#define CS42L43_TRAN_CONFIG3 0x00016208
+#define CS42L43_TRAN_CONFIG4 0x0001620C
+#define CS42L43_TRAN_CONFIG5 0x00016220
+#define CS42L43_TRAN_CONFIG6 0x00016224
+#define CS42L43_TRAN_CONFIG7 0x00016228
+#define CS42L43_TRAN_CONFIG8 0x0001622C
+#define CS42L43_TRAN_STATUS1 0x00016300
+#define CS42L43_TRAN_STATUS2 0x00016304
+#define CS42L43_TRAN_STATUS3 0x00016308
+#define CS42L43_TX_DATA 0x00016400
+#define CS42L43_RX_DATA 0x00016600
+#define CS42L43_DACCNFG1 0x00017000
+#define CS42L43_DACCNFG2 0x00017004
+#define CS42L43_HPPATHVOL 0x0001700C
+#define CS42L43_PGAVOL 0x00017014
+#define CS42L43_LOADDETRESULTS 0x00017018
+#define CS42L43_LOADDETENA 0x00017024
+#define CS42L43_CTRL 0x00017028
+#define CS42L43_COEFF_DATA_IN0 0x00018000
+#define CS42L43_COEFF_RD_WR0 0x00018008
+#define CS42L43_INIT_DONE0 0x00018010
+#define CS42L43_START_EQZ0 0x00018014
+#define CS42L43_MUTE_EQ_IN0 0x0001801C
+#define CS42L43_DECIM_INT 0x0001B000
+#define CS42L43_EQ_INT 0x0001B004
+#define CS42L43_ASP_INT 0x0001B008
+#define CS42L43_PLL_INT 0x0001B00C
+#define CS42L43_SOFT_INT 0x0001B010
+#define CS42L43_SWIRE_INT 0x0001B014
+#define CS42L43_MSM_INT 0x0001B018
+#define CS42L43_ACC_DET_INT 0x0001B01C
+#define CS42L43_I2C_TGT_INT 0x0001B020
+#define CS42L43_SPI_MSTR_INT 0x0001B024
+#define CS42L43_SW_TO_SPI_BRIDGE_INT 0x0001B028
+#define CS42L43_OTP_INT 0x0001B02C
+#define CS42L43_CLASS_D_AMP_INT 0x0001B030
+#define CS42L43_GPIO_INT 0x0001B034
+#define CS42L43_ASRC_INT 0x0001B038
+#define CS42L43_HPOUT_INT 0x0001B03C
+#define CS42L43_DECIM_MASK 0x0001B0A0
+#define CS42L43_EQ_MIX_MASK 0x0001B0A4
+#define CS42L43_ASP_MASK 0x0001B0A8
+#define CS42L43_PLL_MASK 0x0001B0AC
+#define CS42L43_SOFT_MASK 0x0001B0B0
+#define CS42L43_SWIRE_MASK 0x0001B0B4
+#define CS42L43_MSM_MASK 0x0001B0B8
+#define CS42L43_ACC_DET_MASK 0x0001B0BC
+#define CS42L43_I2C_TGT_MASK 0x0001B0C0
+#define CS42L43_SPI_MSTR_MASK 0x0001B0C4
+#define CS42L43_SW_TO_SPI_BRIDGE_MASK 0x0001B0C8
+#define CS42L43_OTP_MASK 0x0001B0CC
+#define CS42L43_CLASS_D_AMP_MASK 0x0001B0D0
+#define CS42L43_GPIO_INT_MASK 0x0001B0D4
+#define CS42L43_ASRC_MASK 0x0001B0D8
+#define CS42L43_HPOUT_MASK 0x0001B0DC
+#define CS42L43_DECIM_INT_SHADOW 0x0001B300
+#define CS42L43_EQ_MIX_INT_SHADOW 0x0001B304
+#define CS42L43_ASP_INT_SHADOW 0x0001B308
+#define CS42L43_PLL_INT_SHADOW 0x0001B30C
+#define CS42L43_SOFT_INT_SHADOW 0x0001B310
+#define CS42L43_SWIRE_INT_SHADOW 0x0001B314
+#define CS42L43_MSM_INT_SHADOW 0x0001B318
+#define CS42L43_ACC_DET_INT_SHADOW 0x0001B31C
+#define CS42L43_I2C_TGT_INT_SHADOW 0x0001B320
+#define CS42L43_SPI_MSTR_INT_SHADOW 0x0001B324
+#define CS42L43_SW_TO_SPI_BRIDGE_SHADOW 0x0001B328
+#define CS42L43_OTP_INT_SHADOW 0x0001B32C
+#define CS42L43_CLASS_D_AMP_INT_SHADOW 0x0001B330
+#define CS42L43_GPIO_SHADOW 0x0001B334
+#define CS42L43_ASRC_SHADOW 0x0001B338
+#define CS42L43_HP_OUT_SHADOW 0x0001B33C
+#define CS42L43_BOOT_CONTROL 0x00101000
+#define CS42L43_BLOCK_EN 0x00101008
+#define CS42L43_SHUTTER_CONTROL 0x0010100C
+#define CS42L43_MCU_SW_REV 0x00114000
+#define CS42L43_PATCH_START_ADDR 0x00114004
+#define CS42L43_NEED_CONFIGS 0x0011400C
+#define CS42L43_BOOT_STATUS 0x0011401C
+#define CS42L43_FW_SH_BOOT_CFG_NEED_CONFIGS 0x0011F8F8
+#define CS42L43_FW_MISSION_CTRL_NEED_CONFIGS 0x0011FE00
+#define CS42L43_FW_MISSION_CTRL_HAVE_CONFIGS 0x0011FE04
+#define CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION 0x0011FE0C
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG 0x0011FE10
+#define CS42L43_MCU_RAM_MAX 0x0011FFFF
+
+/* CS42L43_DEVID */
+#define CS42L43_DEVID_VAL 0x00042A43
+
+/* CS42L43_GEN_INT_STAT_1 */
+#define CS42L43_INT_STAT_GEN1_MASK 0x00000001
+#define CS42L43_INT_STAT_GEN1_SHIFT 0
+
+/* CS42L43_SFT_RESET */
+#define CS42L43_SFT_RESET_MASK 0xFF000000
+#define CS42L43_SFT_RESET_SHIFT 24
+
+#define CS42L43_SFT_RESET_VAL 0x5A000000
+
+/* CS42L43_DRV_CTRL1 */
+#define CS42L43_ASP_DOUT_DRV_MASK 0x00038000
+#define CS42L43_ASP_DOUT_DRV_SHIFT 15
+#define CS42L43_ASP_FSYNC_DRV_MASK 0x00000E00
+#define CS42L43_ASP_FSYNC_DRV_SHIFT 9
+#define CS42L43_ASP_BCLK_DRV_MASK 0x000001C0
+#define CS42L43_ASP_BCLK_DRV_SHIFT 6
+
+/* CS42L43_DRV_CTRL3 */
+#define CS42L43_I2C_ADDR_DRV_MASK 0x30000000
+#define CS42L43_I2C_ADDR_DRV_SHIFT 28
+#define CS42L43_I2C_SDA_DRV_MASK 0x0C000000
+#define CS42L43_I2C_SDA_DRV_SHIFT 26
+#define CS42L43_PDMOUT2_CLK_DRV_MASK 0x00E00000
+#define CS42L43_PDMOUT2_CLK_DRV_SHIFT 21
+#define CS42L43_PDMOUT2_DATA_DRV_MASK 0x001C0000
+#define CS42L43_PDMOUT2_DATA_DRV_SHIFT 18
+#define CS42L43_PDMOUT1_CLK_DRV_MASK 0x00038000
+#define CS42L43_PDMOUT1_CLK_DRV_SHIFT 15
+#define CS42L43_PDMOUT1_DATA_DRV_MASK 0x00007000
+#define CS42L43_PDMOUT1_DATA_DRV_SHIFT 12
+#define CS42L43_SPI_MISO_DRV_MASK 0x00000038
+#define CS42L43_SPI_MISO_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL4 */
+#define CS42L43_GPIO3_DRV_MASK 0x00000E00
+#define CS42L43_GPIO3_DRV_SHIFT 9
+#define CS42L43_GPIO2_DRV_MASK 0x000001C0
+#define CS42L43_GPIO2_DRV_SHIFT 6
+#define CS42L43_GPIO1_DRV_MASK 0x00000038
+#define CS42L43_GPIO1_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL_5 */
+#define CS42L43_I2C_SCL_DRV_MASK 0x18000000
+#define CS42L43_I2C_SCL_DRV_SHIFT 27
+#define CS42L43_SPI_SCK_DRV_MASK 0x07000000
+#define CS42L43_SPI_SCK_DRV_SHIFT 24
+#define CS42L43_SPI_MOSI_DRV_MASK 0x00E00000
+#define CS42L43_SPI_MOSI_DRV_SHIFT 21
+#define CS42L43_SPI_SSB_DRV_MASK 0x001C0000
+#define CS42L43_SPI_SSB_DRV_SHIFT 18
+#define CS42L43_ASP_DIN_DRV_MASK 0x000001C0
+#define CS42L43_ASP_DIN_DRV_SHIFT 6
+
+/* CS42L43_GPIO_CTRL1 */
+#define CS42L43_GPIO3_POL_MASK 0x00040000
+#define CS42L43_GPIO3_POL_SHIFT 18
+#define CS42L43_GPIO2_POL_MASK 0x00020000
+#define CS42L43_GPIO2_POL_SHIFT 17
+#define CS42L43_GPIO1_POL_MASK 0x00010000
+#define CS42L43_GPIO1_POL_SHIFT 16
+#define CS42L43_GPIO3_LVL_MASK 0x00000400
+#define CS42L43_GPIO3_LVL_SHIFT 10
+#define CS42L43_GPIO2_LVL_MASK 0x00000200
+#define CS42L43_GPIO2_LVL_SHIFT 9
+#define CS42L43_GPIO1_LVL_MASK 0x00000100
+#define CS42L43_GPIO1_LVL_SHIFT 8
+#define CS42L43_GPIO3_DIR_MASK 0x00000004
+#define CS42L43_GPIO3_DIR_SHIFT 2
+#define CS42L43_GPIO2_DIR_MASK 0x00000002
+#define CS42L43_GPIO2_DIR_SHIFT 1
+#define CS42L43_GPIO1_DIR_MASK 0x00000001
+#define CS42L43_GPIO1_DIR_SHIFT 0
+
+/* CS42L43_GPIO_CTRL2 */
+#define CS42L43_GPIO3_DEGLITCH_BYP_MASK 0x00000004
+#define CS42L43_GPIO3_DEGLITCH_BYP_SHIFT 2
+#define CS42L43_GPIO2_DEGLITCH_BYP_MASK 0x00000002
+#define CS42L43_GPIO2_DEGLITCH_BYP_SHIFT 1
+#define CS42L43_GPIO1_DEGLITCH_BYP_MASK 0x00000001
+#define CS42L43_GPIO1_DEGLITCH_BYP_SHIFT 0
+
+/* CS42L43_GPIO_STS */
+#define CS42L43_GPIO3_STS_MASK 0x00000004
+#define CS42L43_GPIO3_STS_SHIFT 2
+#define CS42L43_GPIO2_STS_MASK 0x00000002
+#define CS42L43_GPIO2_STS_SHIFT 1
+#define CS42L43_GPIO1_STS_MASK 0x00000001
+#define CS42L43_GPIO1_STS_SHIFT 0
+
+/* CS42L43_GPIO_FN_SEL */
+#define CS42L43_GPIO3_FN_SEL_MASK 0x00000004
+#define CS42L43_GPIO3_FN_SEL_SHIFT 2
+#define CS42L43_GPIO1_FN_SEL_MASK 0x00000001
+#define CS42L43_GPIO1_FN_SEL_SHIFT 0
+
+/* CS42L43_MCLK_SRC_SEL */
+#define CS42L43_OSC_PLL_MCLK_SEL_MASK 0x00000001
+#define CS42L43_OSC_PLL_MCLK_SEL_SHIFT 0
+
+/* CS42L43_SAMPLE_RATE1..CS42L43_SAMPLE_RATE4 */
+#define CS42L43_SAMPLE_RATE_MASK 0x0000001F
+#define CS42L43_SAMPLE_RATE_SHIFT 0
+
+/* CS42L43_PLL_CONTROL */
+#define CS42L43_PLL_REFCLK_EN_MASK 0x00000008
+#define CS42L43_PLL_REFCLK_EN_SHIFT 3
+#define CS42L43_PLL_REFCLK_DIV_MASK 0x00000006
+#define CS42L43_PLL_REFCLK_DIV_SHIFT 1
+#define CS42L43_PLL_REFCLK_SRC_MASK 0x00000001
+#define CS42L43_PLL_REFCLK_SRC_SHIFT 0
+
+/* CS42L43_FS_SELECT1 */
+#define CS42L43_ASP_RATE_MASK 0x00000003
+#define CS42L43_ASP_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT2 */
+#define CS42L43_ASRC_DEC_OUT_RATE_MASK 0x000000C0
+#define CS42L43_ASRC_DEC_OUT_RATE_SHIFT 6
+#define CS42L43_ASRC_INT_OUT_RATE_MASK 0x00000030
+#define CS42L43_ASRC_INT_OUT_RATE_SHIFT 4
+#define CS42L43_ASRC_DEC_IN_RATE_MASK 0x0000000C
+#define CS42L43_ASRC_DEC_IN_RATE_SHIFT 2
+#define CS42L43_ASRC_INT_IN_RATE_MASK 0x00000003
+#define CS42L43_ASRC_INT_IN_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT3 */
+#define CS42L43_HPOUT_RATE_MASK 0x0000C000
+#define CS42L43_HPOUT_RATE_SHIFT 14
+#define CS42L43_EQZ_RATE_MASK 0x00003000
+#define CS42L43_EQZ_RATE_SHIFT 12
+#define CS42L43_DIAGGEN_RATE_MASK 0x00000C00
+#define CS42L43_DIAGGEN_RATE_SHIFT 10
+#define CS42L43_DECIM_CH4_RATE_MASK 0x00000300
+#define CS42L43_DECIM_CH4_RATE_SHIFT 8
+#define CS42L43_DECIM_CH3_RATE_MASK 0x000000C0
+#define CS42L43_DECIM_CH3_RATE_SHIFT 6
+#define CS42L43_DECIM_CH2_RATE_MASK 0x00000030
+#define CS42L43_DECIM_CH2_RATE_SHIFT 4
+#define CS42L43_DECIM_CH1_RATE_MASK 0x0000000C
+#define CS42L43_DECIM_CH1_RATE_SHIFT 2
+#define CS42L43_AMP1_2_RATE_MASK 0x00000003
+#define CS42L43_AMP1_2_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT4 */
+#define CS42L43_SW_DP7_RATE_MASK 0x00C00000
+#define CS42L43_SW_DP7_RATE_SHIFT 22
+#define CS42L43_SW_DP6_RATE_MASK 0x00300000
+#define CS42L43_SW_DP6_RATE_SHIFT 20
+#define CS42L43_SPDIF_RATE_MASK 0x000C0000
+#define CS42L43_SPDIF_RATE_SHIFT 18
+#define CS42L43_SW_DP5_RATE_MASK 0x00030000
+#define CS42L43_SW_DP5_RATE_SHIFT 16
+#define CS42L43_SW_DP4_RATE_MASK 0x0000C000
+#define CS42L43_SW_DP4_RATE_SHIFT 14
+#define CS42L43_SW_DP3_RATE_MASK 0x00003000
+#define CS42L43_SW_DP3_RATE_SHIFT 12
+#define CS42L43_SW_DP2_RATE_MASK 0x00000C00
+#define CS42L43_SW_DP2_RATE_SHIFT 10
+#define CS42L43_SW_DP1_RATE_MASK 0x00000300
+#define CS42L43_SW_DP1_RATE_SHIFT 8
+#define CS42L43_ISRC2_LOW_RATE_MASK 0x000000C0
+#define CS42L43_ISRC2_LOW_RATE_SHIFT 6
+#define CS42L43_ISRC2_HIGH_RATE_MASK 0x00000030
+#define CS42L43_ISRC2_HIGH_RATE_SHIFT 4
+#define CS42L43_ISRC1_LOW_RATE_MASK 0x0000000C
+#define CS42L43_ISRC1_LOW_RATE_SHIFT 2
+#define CS42L43_ISRC1_HIGH_RATE_MASK 0x00000003
+#define CS42L43_ISRC1_HIGH_RATE_SHIFT 0
+
+/* CS42L43_PDM_CONTROL */
+#define CS42L43_PDM2_CLK_DIV_MASK 0x0000000C
+#define CS42L43_PDM2_CLK_DIV_SHIFT 2
+#define CS42L43_PDM1_CLK_DIV_MASK 0x00000003
+#define CS42L43_PDM1_CLK_DIV_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG1 */
+#define CS42L43_ASP_BCLK_N_MASK 0x03FF0000
+#define CS42L43_ASP_BCLK_N_SHIFT 16
+#define CS42L43_ASP_BCLK_M_MASK 0x000003FF
+#define CS42L43_ASP_BCLK_M_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG2 */
+#define CS42L43_ASP_MASTER_MODE_MASK 0x00000002
+#define CS42L43_ASP_MASTER_MODE_SHIFT 1
+#define CS42L43_ASP_BCLK_INV_MASK 0x00000001
+#define CS42L43_ASP_BCLK_INV_SHIFT 0
+
+/* CS42L43_OSC_DIV_SEL */
+#define CS42L43_OSC_DIV2_EN_MASK 0x00000001
+#define CS42L43_OSC_DIV2_EN_SHIFT 0
+
+/* CS42L43_ADC_B_CTRL1..CS42L43_ADC_B_CTRL1 */
+#define CS42L43_PGA_WIDESWING_MODE_EN_MASK 0x00000080
+#define CS42L43_PGA_WIDESWING_MODE_EN_SHIFT 7
+#define CS42L43_ADC_AIN_SEL_MASK 0x00000010
+#define CS42L43_ADC_AIN_SEL_SHIFT 4
+#define CS42L43_ADC_PGA_GAIN_MASK 0x0000000F
+#define CS42L43_ADC_PGA_GAIN_SHIFT 0
+
+/* CS42L43_DECIM_HPF_WNF_CTRL1..CS42L43_DECIM_HPF_WNF_CTRL4 */
+#define CS42L43_DECIM_WNF_CF_MASK 0x00000070
+#define CS42L43_DECIM_WNF_CF_SHIFT 4
+#define CS42L43_DECIM_WNF_EN_MASK 0x00000008
+#define CS42L43_DECIM_WNF_EN_SHIFT 3
+#define CS42L43_DECIM_HPF_CF_MASK 0x00000006
+#define CS42L43_DECIM_HPF_CF_SHIFT 1
+#define CS42L43_DECIM_HPF_EN_MASK 0x00000001
+#define CS42L43_DECIM_HPF_EN_SHIFT 0
+
+/* CS42L43_DMIC_PDM_CTRL */
+#define CS42L43_PDM2R_INV_MASK 0x00000020
+#define CS42L43_PDM2R_INV_SHIFT 5
+#define CS42L43_PDM2L_INV_MASK 0x00000010
+#define CS42L43_PDM2L_INV_SHIFT 4
+#define CS42L43_PDM1R_INV_MASK 0x00000008
+#define CS42L43_PDM1R_INV_SHIFT 3
+#define CS42L43_PDM1L_INV_MASK 0x00000004
+#define CS42L43_PDM1L_INV_SHIFT 2
+
+/* CS42L43_DECIM_VOL_CTRL_CH1_CH2 */
+#define CS42L43_DECIM2_MUTE_MASK 0x80000000
+#define CS42L43_DECIM2_MUTE_SHIFT 31
+#define CS42L43_DECIM2_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM2_VOL_SHIFT 22
+#define CS42L43_DECIM2_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM2_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM2_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM2_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM1_MUTE_MASK 0x00008000
+#define CS42L43_DECIM1_MUTE_SHIFT 15
+#define CS42L43_DECIM1_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM1_VOL_SHIFT 6
+#define CS42L43_DECIM1_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM1_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM1_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM1_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_CH3_CH4 */
+#define CS42L43_DECIM4_MUTE_MASK 0x80000000
+#define CS42L43_DECIM4_MUTE_SHIFT 31
+#define CS42L43_DECIM4_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM4_VOL_SHIFT 22
+#define CS42L43_DECIM4_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM4_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM4_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM4_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM3_MUTE_MASK 0x00008000
+#define CS42L43_DECIM3_MUTE_SHIFT 15
+#define CS42L43_DECIM3_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM3_VOL_SHIFT 6
+#define CS42L43_DECIM3_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM3_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM3_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM3_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_UPDATE */
+#define CS42L43_DECIM4_VOL_UPDATE_MASK 0x00000008
+#define CS42L43_DECIM4_VOL_UPDATE_SHIFT 3
+#define CS42L43_DECIM3_VOL_UPDATE_MASK 0x00000004
+#define CS42L43_DECIM3_VOL_UPDATE_SHIFT 2
+#define CS42L43_DECIM2_VOL_UPDATE_MASK 0x00000002
+#define CS42L43_DECIM2_VOL_UPDATE_SHIFT 1
+#define CS42L43_DECIM1_VOL_UPDATE_MASK 0x00000001
+#define CS42L43_DECIM1_VOL_UPDATE_SHIFT 0
+
+/* CS42L43_INTP_VOLUME_CTRL1..CS42L43_INTP_VOLUME_CTRL2 */
+#define CS42L43_AMP1_2_VU_MASK 0x00000200
+#define CS42L43_AMP1_2_VU_SHIFT 9
+#define CS42L43_AMP_MUTE_MASK 0x00000100
+#define CS42L43_AMP_MUTE_SHIFT 8
+#define CS42L43_AMP_VOL_MASK 0x000000FF
+#define CS42L43_AMP_VOL_SHIFT 0
+
+/* CS42L43_AMP1_2_VOL_RAMP */
+#define CS42L43_AMP1_2_VD_RAMP_MASK 0x00000070
+#define CS42L43_AMP1_2_VD_RAMP_SHIFT 4
+#define CS42L43_AMP1_2_VI_RAMP_MASK 0x00000007
+#define CS42L43_AMP1_2_VI_RAMP_SHIFT 0
+
+/* CS42L43_ASP_CTRL */
+#define CS42L43_ASP_FSYNC_MODE_MASK 0x00000004
+#define CS42L43_ASP_FSYNC_MODE_SHIFT 2
+#define CS42L43_ASP_BCLK_EN_MASK 0x00000002
+#define CS42L43_ASP_BCLK_EN_SHIFT 1
+#define CS42L43_ASP_FSYNC_EN_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_EN_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL1 */
+#define CS42L43_ASP_FSYNC_M_MASK 0x0007FFFF
+#define CS42L43_ASP_FSYNC_M_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL3 */
+#define CS42L43_ASP_FSYNC_IN_INV_MASK 0x00000002
+#define CS42L43_ASP_FSYNC_IN_INV_SHIFT 1
+#define CS42L43_ASP_FSYNC_OUT_INV_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_OUT_INV_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL4 */
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_MASK 0x00001FFE
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_SHIFT 1
+
+/* CS42L43_ASP_DATA_CTRL */
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_MASK 0x00000008
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_SHIFT 3
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_MASK 0x00000007
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_SHIFT 0
+
+/* CS42L43_ASP_RX_EN */
+#define CS42L43_ASP_RX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_RX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_RX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_RX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_RX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_RX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_RX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_RX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_RX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_RX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_RX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_RX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_TX_EN */
+#define CS42L43_ASP_TX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_TX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_TX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_TX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_TX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_TX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_TX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_TX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_TX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_TX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_TX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_TX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_RX_CH1_CTRL..CS42L43_ASP_TX_CH6_CTRL */
+#define CS42L43_ASP_CH_WIDTH_MASK 0x001F0000
+#define CS42L43_ASP_CH_WIDTH_SHIFT 16
+#define CS42L43_ASP_CH_SLOT_MASK 0x00001FFE
+#define CS42L43_ASP_CH_SLOT_SHIFT 1
+#define CS42L43_ASP_CH_SLOT_PHASE_MASK 0x00000001
+#define CS42L43_ASP_CH_SLOT_PHASE_SHIFT 0
+
+/* CS42L43_ASPTX1_INPUT..CS42L43_AMP4MIX_INPUT4 */
+#define CS42L43_MIXER_VOL_MASK 0x00FE0000
+#define CS42L43_MIXER_VOL_SHIFT 17
+#define CS42L43_MIXER_SRC_MASK 0x000001FF
+#define CS42L43_MIXER_SRC_SHIFT 0
+
+/* CS42L43_ASRC_INT_ENABLES */
+#define CS42L43_ASRC_INT4_EN_MASK 0x00000008
+#define CS42L43_ASRC_INT4_EN_SHIFT 3
+#define CS42L43_ASRC_INT3_EN_MASK 0x00000004
+#define CS42L43_ASRC_INT3_EN_SHIFT 2
+#define CS42L43_ASRC_INT2_EN_MASK 0x00000002
+#define CS42L43_ASRC_INT2_EN_SHIFT 1
+#define CS42L43_ASRC_INT1_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT1_EN_SHIFT 0
+
+/* CS42L43_ASRC_DEC_ENABLES */
+#define CS42L43_ASRC_DEC4_EN_MASK 0x00000008
+#define CS42L43_ASRC_DEC4_EN_SHIFT 3
+#define CS42L43_ASRC_DEC3_EN_MASK 0x00000004
+#define CS42L43_ASRC_DEC3_EN_SHIFT 2
+#define CS42L43_ASRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC2_EN_SHIFT 1
+#define CS42L43_ASRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ASRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_PDNCNTL */
+#define CS42L43_RING_SENSE_EN_MASK 0x00000002
+#define CS42L43_RING_SENSE_EN_SHIFT 1
+
+/* CS42L43_RINGSENSE_DEB_CTRL */
+#define CS42L43_RINGSENSE_INV_MASK 0x00000080
+#define CS42L43_RINGSENSE_INV_SHIFT 7
+#define CS42L43_RINGSENSE_PULLUP_PDNB_MASK 0x00000040
+#define CS42L43_RINGSENSE_PULLUP_PDNB_SHIFT 6
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_RINGSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_RINGSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIPSENSE_DEB_CTRL */
+#define CS42L43_TIPSENSE_INV_MASK 0x00000080
+#define CS42L43_TIPSENSE_INV_SHIFT 7
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_TIPSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_TIPSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS */
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_DB_STS_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_DB_STS_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_DB_STS_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_DB_STS_SHIFT 0
+
+/* CS42L43_HS2 */
+#define CS42L43_HS_CLAMP_DISABLE_MASK 0x10000000
+#define CS42L43_HS_CLAMP_DISABLE_SHIFT 28
+#define CS42L43_HSBIAS_RAMP_MASK 0x0C000000
+#define CS42L43_HSBIAS_RAMP_SHIFT 26
+#define CS42L43_HSDET_MODE_MASK 0x00018000
+#define CS42L43_HSDET_MODE_SHIFT 15
+#define CS42L43_HSDET_MANUAL_MODE_MASK 0x00006000
+#define CS42L43_HSDET_MANUAL_MODE_SHIFT 13
+#define CS42L43_AUTO_HSDET_TIME_MASK 0x00000700
+#define CS42L43_AUTO_HSDET_TIME_SHIFT 8
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_MASK 0x00000080
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_SHIFT 7
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_MASK 0x00000040
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_SHIFT 6
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_MASK 0x00000020
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_SHIFT 5
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_MASK 0x00000010
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_SHIFT 4
+#define CS42L43_HSBIAS_OUT_HS3_SEL_MASK 0x00000008
+#define CS42L43_HSBIAS_OUT_HS3_SEL_SHIFT 3
+#define CS42L43_HSBIAS_OUT_HS4_SEL_MASK 0x00000004
+#define CS42L43_HSBIAS_OUT_HS4_SEL_SHIFT 2
+#define CS42L43_HSGND_HS3_SEL_MASK 0x00000002
+#define CS42L43_HSGND_HS3_SEL_SHIFT 1
+#define CS42L43_HSGND_HS4_SEL_MASK 0x00000001
+#define CS42L43_HSGND_HS4_SEL_SHIFT 0
+
+/* CS42L43_HS_STAT */
+#define CS42L43_HSDET_TYPE_STS_MASK 0x00000007
+#define CS42L43_HSDET_TYPE_STS_SHIFT 0
+
+/* CS42L43_MCU_SW_INTERRUPT */
+#define CS42L43_CONTROL_IND_MASK 0x00000004
+#define CS42L43_CONTROL_IND_SHIFT 2
+#define CS42L43_CONFIGS_IND_MASK 0x00000002
+#define CS42L43_CONFIGS_IND_SHIFT 1
+#define CS42L43_PATCH_IND_MASK 0x00000001
+#define CS42L43_PATCH_IND_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CTRL */
+#define CS42L43_HS2_BIAS_SENSE_EN_MASK 0x00000020
+#define CS42L43_HS2_BIAS_SENSE_EN_SHIFT 5
+#define CS42L43_HS1_BIAS_SENSE_EN_MASK 0x00000010
+#define CS42L43_HS1_BIAS_SENSE_EN_SHIFT 4
+#define CS42L43_HS2_BIAS_EN_MASK 0x00000008
+#define CS42L43_HS2_BIAS_EN_SHIFT 3
+#define CS42L43_HS1_BIAS_EN_MASK 0x00000004
+#define CS42L43_HS1_BIAS_EN_SHIFT 2
+#define CS42L43_JACK_STEREO_CONFIG_MASK 0x00000003
+#define CS42L43_JACK_STEREO_CONFIG_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CLAMP_CTRL */
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK 0x00000002
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_SHIFT 1
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK 0x00000001
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_SHIFT 0
+
+/* CS42L43_BLOCK_EN2 */
+#define CS42L43_SPI_MSTR_EN_MASK 0x00000001
+#define CS42L43_SPI_MSTR_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN3 */
+#define CS42L43_PDM2_DIN_R_EN_MASK 0x00000020
+#define CS42L43_PDM2_DIN_R_EN_SHIFT 5
+#define CS42L43_PDM2_DIN_L_EN_MASK 0x00000010
+#define CS42L43_PDM2_DIN_L_EN_SHIFT 4
+#define CS42L43_PDM1_DIN_R_EN_MASK 0x00000008
+#define CS42L43_PDM1_DIN_R_EN_SHIFT 3
+#define CS42L43_PDM1_DIN_L_EN_MASK 0x00000004
+#define CS42L43_PDM1_DIN_L_EN_SHIFT 2
+#define CS42L43_ADC2_EN_MASK 0x00000002
+#define CS42L43_ADC2_EN_SHIFT 1
+#define CS42L43_ADC1_EN_MASK 0x00000001
+#define CS42L43_ADC1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN4 */
+#define CS42L43_ASRC_DEC_BANK_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC_BANK_EN_SHIFT 1
+#define CS42L43_ASRC_INT_BANK_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN5 */
+#define CS42L43_ISRC2_BANK_EN_MASK 0x00000002
+#define CS42L43_ISRC2_BANK_EN_SHIFT 1
+#define CS42L43_ISRC1_BANK_EN_MASK 0x00000001
+#define CS42L43_ISRC1_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN6 */
+#define CS42L43_MIXER_EN_MASK 0x00000001
+#define CS42L43_MIXER_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN7 */
+#define CS42L43_EQ_EN_MASK 0x00000001
+#define CS42L43_EQ_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN8 */
+#define CS42L43_HP_EN_MASK 0x00000001
+#define CS42L43_HP_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN9 */
+#define CS42L43_TONE_EN_MASK 0x00000001
+#define CS42L43_TONE_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN10 */
+#define CS42L43_AMP2_EN_MASK 0x00000002
+#define CS42L43_AMP2_EN_SHIFT 1
+#define CS42L43_AMP1_EN_MASK 0x00000001
+#define CS42L43_AMP1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN11 */
+#define CS42L43_SPDIF_EN_MASK 0x00000001
+#define CS42L43_SPDIF_EN_SHIFT 0
+
+/* CS42L43_TONE_CH1_CTRL..CS42L43_TONE_CH2_CTRL */
+#define CS42L43_TONE_FREQ_MASK 0x00000070
+#define CS42L43_TONE_FREQ_SHIFT 4
+#define CS42L43_TONE_SEL_MASK 0x0000000F
+#define CS42L43_TONE_SEL_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_1 */
+#define CS42L43_BUTTON_DETECT_MODE_MASK 0x00000018
+#define CS42L43_BUTTON_DETECT_MODE_SHIFT 3
+#define CS42L43_HSBIAS_MODE_MASK 0x00000006
+#define CS42L43_HSBIAS_MODE_SHIFT 1
+#define CS42L43_MIC_LVL_DET_DISABLE_MASK 0x00000001
+#define CS42L43_MIC_LVL_DET_DISABLE_SHIFT 0
+
+/* CS42L43_DETECT_STATUS_1 */
+#define CS42L43_HSDET_DC_STS_MASK 0x01FF0000
+#define CS42L43_HSDET_DC_STS_SHIFT 16
+#define CS42L43_JACKDET_STS_MASK 0x00000080
+#define CS42L43_JACKDET_STS_SHIFT 7
+#define CS42L43_HSBIAS_CLAMP_STS_MASK 0x00000040
+#define CS42L43_HSBIAS_CLAMP_STS_SHIFT 6
+
+/* CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL */
+#define CS42L43_JACKDET_MODE_MASK 0xC0000000
+#define CS42L43_JACKDET_MODE_SHIFT 30
+#define CS42L43_JACKDET_INV_MASK 0x20000000
+#define CS42L43_JACKDET_INV_SHIFT 29
+#define CS42L43_JACKDET_DB_TIME_MASK 0x03000000
+#define CS42L43_JACKDET_DB_TIME_SHIFT 24
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_MASK 0x00800000
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_SHIFT 23
+#define CS42L43_HSBIAS_SENSE_EN_MASK 0x00000080
+#define CS42L43_HSBIAS_SENSE_EN_SHIFT 7
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK 0x00000040
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_SHIFT 6
+#define CS42L43_JACKDET_SENSE_EN_MASK 0x00000020
+#define CS42L43_JACKDET_SENSE_EN_SHIFT 5
+#define CS42L43_HSBIAS_SENSE_TRIP_MASK 0x00000007
+#define CS42L43_HSBIAS_SENSE_TRIP_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_ANDROID */
+#define CS42L43_HSDET_LVL_COMBWIDTH_MASK 0xC0000000
+#define CS42L43_HSDET_LVL_COMBWIDTH_SHIFT 30
+#define CS42L43_HSDET_LVL2_THRESH_MASK 0x01FF0000
+#define CS42L43_HSDET_LVL2_THRESH_SHIFT 16
+#define CS42L43_HSDET_LVL1_THRESH_MASK 0x000001FF
+#define CS42L43_HSDET_LVL1_THRESH_SHIFT 0
+
+/* CS42L43_ISRC1_CTRL..CS42L43_ISRC2_CTRL */
+#define CS42L43_ISRC_INT2_EN_MASK 0x00000200
+#define CS42L43_ISRC_INT2_EN_SHIFT 9
+#define CS42L43_ISRC_INT1_EN_MASK 0x00000100
+#define CS42L43_ISRC_INT1_EN_SHIFT 8
+#define CS42L43_ISRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ISRC_DEC2_EN_SHIFT 1
+#define CS42L43_ISRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ISRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_CTRL_REG */
+#define CS42L43_PLL_MODE_BYPASS_500_MASK 0x00000004
+#define CS42L43_PLL_MODE_BYPASS_500_SHIFT 2
+#define CS42L43_PLL_MODE_BYPASS_1029_MASK 0x00000002
+#define CS42L43_PLL_MODE_BYPASS_1029_SHIFT 1
+#define CS42L43_PLL_EN_MASK 0x00000001
+#define CS42L43_PLL_EN_SHIFT 0
+
+/* CS42L43_FDIV_FRAC */
+#define CS42L43_PLL_DIV_INT_MASK 0xFF000000
+#define CS42L43_PLL_DIV_INT_SHIFT 24
+#define CS42L43_PLL_DIV_FRAC_BYTE2_MASK 0x00FF0000
+#define CS42L43_PLL_DIV_FRAC_BYTE2_SHIFT 16
+#define CS42L43_PLL_DIV_FRAC_BYTE1_MASK 0x0000FF00
+#define CS42L43_PLL_DIV_FRAC_BYTE1_SHIFT 8
+#define CS42L43_PLL_DIV_FRAC_BYTE0_MASK 0x000000FF
+#define CS42L43_PLL_DIV_FRAC_BYTE0_SHIFT 0
+
+/* CS42L43_CAL_RATIO */
+#define CS42L43_PLL_CAL_RATIO_MASK 0x000000FF
+#define CS42L43_PLL_CAL_RATIO_SHIFT 0
+
+/* CS42L43_SPI_CLK_CONFIG1 */
+#define CS42L43_SCLK_DIV_MASK 0x0000000F
+#define CS42L43_SCLK_DIV_SHIFT 0
+
+/* CS42L43_SPI_CONFIG1 */
+#define CS42L43_SPI_SS_IDLE_DUR_MASK 0x0F000000
+#define CS42L43_SPI_SS_IDLE_DUR_SHIFT 24
+#define CS42L43_SPI_SS_DELAY_DUR_MASK 0x000F0000
+#define CS42L43_SPI_SS_DELAY_DUR_SHIFT 16
+#define CS42L43_SPI_THREE_WIRE_MASK 0x00000100
+#define CS42L43_SPI_THREE_WIRE_SHIFT 8
+#define CS42L43_SPI_DPHA_MASK 0x00000040
+#define CS42L43_SPI_DPHA_SHIFT 6
+#define CS42L43_SPI_CPHA_MASK 0x00000020
+#define CS42L43_SPI_CPHA_SHIFT 5
+#define CS42L43_SPI_CPOL_MASK 0x00000010
+#define CS42L43_SPI_CPOL_SHIFT 4
+#define CS42L43_SPI_SS_SEL_MASK 0x00000007
+#define CS42L43_SPI_SS_SEL_SHIFT 0
+
+/* CS42L43_SPI_CONFIG2 */
+#define CS42L43_SPI_SS_FRC_MASK 0x00000001
+#define CS42L43_SPI_SS_FRC_SHIFT 0
+
+/* CS42L43_SPI_CONFIG3 */
+#define CS42L43_SPI_WDT_ENA_MASK 0x00000001
+#define CS42L43_SPI_WDT_ENA_SHIFT 0
+
+/* CS42L43_SPI_CONFIG4 */
+#define CS42L43_SPI_STALL_ENA_MASK 0x00010000
+#define CS42L43_SPI_STALL_ENA_SHIFT 16
+
+/* CS42L43_SPI_STATUS1 */
+#define CS42L43_SPI_ABORT_STS_MASK 0x00000002
+#define CS42L43_SPI_ABORT_STS_SHIFT 1
+#define CS42L43_SPI_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_DONE_STS_SHIFT 0
+
+/* CS42L43_SPI_STATUS2 */
+#define CS42L43_SPI_RX_DONE_STS_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_STS_SHIFT 4
+#define CS42L43_SPI_TX_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_STS_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG1 */
+#define CS42L43_SPI_START_MASK 0x00000001
+#define CS42L43_SPI_START_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG2 */
+#define CS42L43_SPI_ABORT_MASK 0x00000001
+#define CS42L43_SPI_ABORT_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG3 */
+#define CS42L43_SPI_WORD_SIZE_MASK 0x00070000
+#define CS42L43_SPI_WORD_SIZE_SHIFT 16
+#define CS42L43_SPI_CMD_MASK 0x00000003
+#define CS42L43_SPI_CMD_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG4 */
+#define CS42L43_SPI_TX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG5 */
+#define CS42L43_SPI_RX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG6 */
+#define CS42L43_SPI_TX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_TX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG7 */
+#define CS42L43_SPI_RX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_RX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG8 */
+#define CS42L43_SPI_RX_DONE_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_SHIFT 4
+#define CS42L43_SPI_TX_DONE_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_SHIFT 0
+
+/* CS42L43_TRAN_STATUS1 */
+#define CS42L43_SPI_BUSY_STS_MASK 0x00000100
+#define CS42L43_SPI_BUSY_STS_SHIFT 8
+#define CS42L43_SPI_RX_REQUEST_MASK 0x00000010
+#define CS42L43_SPI_RX_REQUEST_SHIFT 4
+#define CS42L43_SPI_TX_REQUEST_MASK 0x00000001
+#define CS42L43_SPI_TX_REQUEST_SHIFT 0
+
+/* CS42L43_TRAN_STATUS2 */
+#define CS42L43_SPI_TX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TRAN_STATUS3 */
+#define CS42L43_SPI_RX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TX_DATA */
+#define CS42L43_SPI_TX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_TX_DATA_SHIFT 0
+
+/* CS42L43_RX_DATA */
+#define CS42L43_SPI_RX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_RX_DATA_SHIFT 0
+
+/* CS42L43_DACCNFG1 */
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_MASK 0x00000008
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_SHIFT 3
+#define CS42L43_AMP4_INV_MASK 0x00000002
+#define CS42L43_AMP4_INV_SHIFT 1
+#define CS42L43_AMP3_INV_MASK 0x00000001
+#define CS42L43_AMP3_INV_SHIFT 0
+
+/* CS42L43_DACCNFG2 */
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_MASK 0x00000002
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_SHIFT 1
+#define CS42L43_HP_HPF_EN_MASK 0x00000001
+#define CS42L43_HP_HPF_EN_SHIFT 0
+
+/* CS42L43_HPPATHVOL */
+#define CS42L43_AMP4_PATH_VOL_MASK 0x01FF0000
+#define CS42L43_AMP4_PATH_VOL_SHIFT 16
+#define CS42L43_AMP3_PATH_VOL_MASK 0x000001FF
+#define CS42L43_AMP3_PATH_VOL_SHIFT 0
+
+/* CS42L43_PGAVOL */
+#define CS42L43_HP_PATH_VOL_RAMP_MASK 0x0003C000
+#define CS42L43_HP_PATH_VOL_RAMP_SHIFT 14
+#define CS42L43_HP_PATH_VOL_ZC_MASK 0x00002000
+#define CS42L43_HP_PATH_VOL_ZC_SHIFT 13
+#define CS42L43_HP_PATH_VOL_SFT_MASK 0x00001000
+#define CS42L43_HP_PATH_VOL_SFT_SHIFT 12
+#define CS42L43_HP_DIG_VOL_RAMP_MASK 0x00000F00
+#define CS42L43_HP_DIG_VOL_RAMP_SHIFT 8
+#define CS42L43_HP_ANA_VOL_RAMP_MASK 0x0000000F
+#define CS42L43_HP_ANA_VOL_RAMP_SHIFT 0
+
+/* CS42L43_LOADDETRESULTS */
+#define CS42L43_AMP3_RES_DET_MASK 0x00000003
+#define CS42L43_AMP3_RES_DET_SHIFT 0
+
+/* CS42L43_LOADDETENA */
+#define CS42L43_HPLOAD_DET_EN_MASK 0x00000001
+#define CS42L43_HPLOAD_DET_EN_SHIFT 0
+
+/* CS42L43_CTRL */
+#define CS42L43_ADPTPWR_MODE_MASK 0x00000007
+#define CS42L43_ADPTPWR_MODE_SHIFT 0
+
+/* CS42L43_COEFF_RD_WR0 */
+#define CS42L43_WRITE_MODE_MASK 0x00000002
+#define CS42L43_WRITE_MODE_SHIFT 1
+
+/* CS42L43_INIT_DONE0 */
+#define CS42L43_INITIALIZE_DONE_MASK 0x00000001
+#define CS42L43_INITIALIZE_DONE_SHIFT 0
+
+/* CS42L43_START_EQZ0 */
+#define CS42L43_START_FILTER_MASK 0x00000001
+#define CS42L43_START_FILTER_SHIFT 0
+
+/* CS42L43_MUTE_EQ_IN0 */
+#define CS42L43_MUTE_EQ_CH2_MASK 0x00000002
+#define CS42L43_MUTE_EQ_CH2_SHIFT 1
+#define CS42L43_MUTE_EQ_CH1_MASK 0x00000001
+#define CS42L43_MUTE_EQ_CH1_SHIFT 0
+
+/* CS42L43_PLL_INT */
+#define CS42L43_PLL_LOST_LOCK_INT_MASK 0x00000002
+#define CS42L43_PLL_LOST_LOCK_INT_SHIFT 1
+#define CS42L43_PLL_READY_INT_MASK 0x00000001
+#define CS42L43_PLL_READY_INT_SHIFT 0
+
+/* CS42L43_SOFT_INT */
+#define CS42L43_CONTROL_APPLIED_INT_MASK 0x00000010
+#define CS42L43_CONTROL_APPLIED_INT_SHIFT 4
+#define CS42L43_CONTROL_WARN_INT_MASK 0x00000008
+#define CS42L43_CONTROL_WARN_INT_SHIFT 3
+#define CS42L43_PATCH_WARN_INT_MASK 0x00000002
+#define CS42L43_PATCH_WARN_INT_SHIFT 1
+#define CS42L43_PATCH_APPLIED_INT_MASK 0x00000001
+#define CS42L43_PATCH_APPLIED_INT_SHIFT 0
+
+/* CS42L43_MSM_INT */
+#define CS42L43_HP_STARTUP_DONE_INT_MASK 0x00000800
+#define CS42L43_HP_STARTUP_DONE_INT_SHIFT 11
+#define CS42L43_HP_SHUTDOWN_DONE_INT_MASK 0x00000400
+#define CS42L43_HP_SHUTDOWN_DONE_INT_SHIFT 10
+#define CS42L43_HSDET_DONE_INT_MASK 0x00000200
+#define CS42L43_HSDET_DONE_INT_SHIFT 9
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_MASK 0x00000080
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_SHIFT 7
+#define CS42L43_TIPSENSE_PLUG_DB_INT_MASK 0x00000040
+#define CS42L43_TIPSENSE_PLUG_DB_INT_SHIFT 6
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_MASK 0x00000020
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_SHIFT 5
+#define CS42L43_RINGSENSE_PLUG_DB_INT_MASK 0x00000010
+#define CS42L43_RINGSENSE_PLUG_DB_INT_SHIFT 4
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_SHIFT 0
+
+/* CS42L43_ACC_DET_INT */
+#define CS42L43_HS2_BIAS_SENSE_INT_MASK 0x00000800
+#define CS42L43_HS2_BIAS_SENSE_INT_SHIFT 11
+#define CS42L43_HS1_BIAS_SENSE_INT_MASK 0x00000400
+#define CS42L43_HS1_BIAS_SENSE_INT_SHIFT 10
+#define CS42L43_DC_DETECT1_FALSE_INT_MASK 0x00000080
+#define CS42L43_DC_DETECT1_FALSE_INT_SHIFT 7
+#define CS42L43_DC_DETECT1_TRUE_INT_MASK 0x00000040
+#define CS42L43_DC_DETECT1_TRUE_INT_SHIFT 6
+#define CS42L43_HSBIAS_CLAMPED_INT_MASK 0x00000008
+#define CS42L43_HSBIAS_CLAMPED_INT_SHIFT 3
+#define CS42L43_HS3_4_BIAS_SENSE_INT_MASK 0x00000001
+#define CS42L43_HS3_4_BIAS_SENSE_INT_SHIFT 0
+
+/* CS42L43_SPI_MSTR_INT */
+#define CS42L43_IRQ_SPI_STALLING_INT_MASK 0x00000004
+#define CS42L43_IRQ_SPI_STALLING_INT_SHIFT 2
+#define CS42L43_IRQ_SPI_STS_INT_MASK 0x00000002
+#define CS42L43_IRQ_SPI_STS_INT_SHIFT 1
+#define CS42L43_IRQ_SPI_BLOCK_INT_MASK 0x00000001
+#define CS42L43_IRQ_SPI_BLOCK_INT_SHIFT 0
+
+/* CS42L43_SW_TO_SPI_BRIDGE_INT */
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_MASK 0x00000001
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_SHIFT 0
+
+/* CS42L43_CLASS_D_AMP_INT */
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_MASK 0x00002000
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_SHIFT 13
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_MASK 0x00001000
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_SHIFT 12
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_MASK 0x00000800
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_SHIFT 11
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_MASK 0x00000400
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_SHIFT 10
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_MASK 0x00000200
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_SHIFT 9
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_MASK 0x00000100
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_SHIFT 8
+#define CS42L43_AMP2_STARTUP_DONE_INT_MASK 0x00000080
+#define CS42L43_AMP2_STARTUP_DONE_INT_SHIFT 7
+#define CS42L43_AMP1_STARTUP_DONE_INT_MASK 0x00000040
+#define CS42L43_AMP1_STARTUP_DONE_INT_SHIFT 6
+#define CS42L43_AMP2_THERM_SHDN_INT_MASK 0x00000020
+#define CS42L43_AMP2_THERM_SHDN_INT_SHIFT 5
+#define CS42L43_AMP1_THERM_SHDN_INT_MASK 0x00000010
+#define CS42L43_AMP1_THERM_SHDN_INT_SHIFT 4
+#define CS42L43_AMP2_THERM_WARN_INT_MASK 0x00000008
+#define CS42L43_AMP2_THERM_WARN_INT_SHIFT 3
+#define CS42L43_AMP1_THERM_WARN_INT_MASK 0x00000004
+#define CS42L43_AMP1_THERM_WARN_INT_SHIFT 2
+#define CS42L43_AMP2_SCDET_INT_MASK 0x00000002
+#define CS42L43_AMP2_SCDET_INT_SHIFT 1
+#define CS42L43_AMP1_SCDET_INT_MASK 0x00000001
+#define CS42L43_AMP1_SCDET_INT_SHIFT 0
+
+/* CS42L43_GPIO_INT */
+#define CS42L43_GPIO3_FALL_INT_MASK 0x00000020
+#define CS42L43_GPIO3_FALL_INT_SHIFT 5
+#define CS42L43_GPIO3_RISE_INT_MASK 0x00000010
+#define CS42L43_GPIO3_RISE_INT_SHIFT 4
+#define CS42L43_GPIO2_FALL_INT_MASK 0x00000008
+#define CS42L43_GPIO2_FALL_INT_SHIFT 3
+#define CS42L43_GPIO2_RISE_INT_MASK 0x00000004
+#define CS42L43_GPIO2_RISE_INT_SHIFT 2
+#define CS42L43_GPIO1_FALL_INT_MASK 0x00000002
+#define CS42L43_GPIO1_FALL_INT_SHIFT 1
+#define CS42L43_GPIO1_RISE_INT_MASK 0x00000001
+#define CS42L43_GPIO1_RISE_INT_SHIFT 0
+
+/* CS42L43_HPOUT_INT */
+#define CS42L43_HP_ILIMIT_INT_MASK 0x00000002
+#define CS42L43_HP_ILIMIT_INT_SHIFT 1
+#define CS42L43_HP_LOADDET_DONE_INT_MASK 0x00000001
+#define CS42L43_HP_LOADDET_DONE_INT_SHIFT 0
+
+/* CS42L43_BOOT_CONTROL */
+#define CS42L43_LOCK_HW_STS_MASK 0x00000002
+#define CS42L43_LOCK_HW_STS_SHIFT 1
+
+/* CS42L43_BLOCK_EN */
+#define CS42L43_MCU_EN_MASK 0x00000001
+#define CS42L43_MCU_EN_SHIFT 0
+
+/* CS42L43_SHUTTER_CONTROL */
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_MASK 0x00008000
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_SHIFT 15
+#define CS42L43_SPK_SHUTTER_CFG_MASK 0x00000F00
+#define CS42L43_SPK_SHUTTER_CFG_SHIFT 8
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_MASK 0x00000080
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_SHIFT 7
+#define CS42L43_MIC_SHUTTER_CFG_MASK 0x0000000F
+#define CS42L43_MIC_SHUTTER_CFG_SHIFT 0
+
+/* CS42L43_MCU_SW_REV */
+#define CS42L43_BIOS_SUBMINOR_REV_MASK 0xFF000000
+#define CS42L43_BIOS_SUBMINOR_REV_SHIFT 24
+#define CS42L43_BIOS_MINOR_REV_MASK 0x00F00000
+#define CS42L43_BIOS_MINOR_REV_SHIFT 20
+#define CS42L43_BIOS_MAJOR_REV_MASK 0x000F0000
+#define CS42L43_BIOS_MAJOR_REV_SHIFT 16
+#define CS42L43_FW_SUBMINOR_REV_MASK 0x0000FF00
+#define CS42L43_FW_SUBMINOR_REV_SHIFT 8
+#define CS42L43_FW_MINOR_REV_MASK 0x000000F0
+#define CS42L43_FW_MINOR_REV_SHIFT 4
+#define CS42L43_FW_MAJOR_REV_MASK 0x0000000F
+#define CS42L43_FW_MAJOR_REV_SHIFT 0
+
+/* CS42L43_NEED_CONFIGS */
+#define CS42L43_FW_PATCH_NEED_CFG_MASK 0x80000000
+#define CS42L43_FW_PATCH_NEED_CFG_SHIFT 31
+
+/* CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION */
+#define CS42L43_FW_MM_CTRL_MCU_SEL_MASK 0x00000001
+#define CS42L43_FW_MM_CTRL_MCU_SEL_SHIFT 0
+
+/* CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG */
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_DISABLE_VAL 0xF05AA50F
+
+#endif /* CS42L43_CORE_REGS_H */
diff --git a/include/linux/mfd/cs42l43.h b/include/linux/mfd/cs42l43.h
new file mode 100644
index 000000000000..cf8263aab41b
--- /dev/null
+++ b/include/linux/mfd/cs42l43.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CS42L43 core driver external data
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/workqueue.h>
+
+#ifndef CS42L43_CORE_EXT_H
+#define CS42L43_CORE_EXT_H
+
+#define CS42L43_N_SUPPLIES 3
+
+enum cs42l43_irq_numbers {
+ CS42L43_PLL_LOST_LOCK,
+ CS42L43_PLL_READY,
+
+ CS42L43_HP_STARTUP_DONE,
+ CS42L43_HP_SHUTDOWN_DONE,
+ CS42L43_HSDET_DONE,
+ CS42L43_TIPSENSE_UNPLUG_DB,
+ CS42L43_TIPSENSE_PLUG_DB,
+ CS42L43_RINGSENSE_UNPLUG_DB,
+ CS42L43_RINGSENSE_PLUG_DB,
+ CS42L43_TIPSENSE_UNPLUG_PDET,
+ CS42L43_TIPSENSE_PLUG_PDET,
+ CS42L43_RINGSENSE_UNPLUG_PDET,
+ CS42L43_RINGSENSE_PLUG_PDET,
+
+ CS42L43_HS2_BIAS_SENSE,
+ CS42L43_HS1_BIAS_SENSE,
+ CS42L43_DC_DETECT1_FALSE,
+ CS42L43_DC_DETECT1_TRUE,
+ CS42L43_HSBIAS_CLAMPED,
+ CS42L43_HS3_4_BIAS_SENSE,
+
+ CS42L43_AMP2_CLK_STOP_FAULT,
+ CS42L43_AMP1_CLK_STOP_FAULT,
+ CS42L43_AMP2_VDDSPK_FAULT,
+ CS42L43_AMP1_VDDSPK_FAULT,
+ CS42L43_AMP2_SHUTDOWN_DONE,
+ CS42L43_AMP1_SHUTDOWN_DONE,
+ CS42L43_AMP2_STARTUP_DONE,
+ CS42L43_AMP1_STARTUP_DONE,
+ CS42L43_AMP2_THERM_SHDN,
+ CS42L43_AMP1_THERM_SHDN,
+ CS42L43_AMP2_THERM_WARN,
+ CS42L43_AMP1_THERM_WARN,
+ CS42L43_AMP2_SCDET,
+ CS42L43_AMP1_SCDET,
+
+ CS42L43_GPIO3_FALL,
+ CS42L43_GPIO3_RISE,
+ CS42L43_GPIO2_FALL,
+ CS42L43_GPIO2_RISE,
+ CS42L43_GPIO1_FALL,
+ CS42L43_GPIO1_RISE,
+
+ CS42L43_HP_ILIMIT,
+ CS42L43_HP_LOADDET_DONE,
+};
+
+struct cs42l43 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct sdw_slave *sdw;
+
+ struct regulator *vdd_p;
+ struct regulator *vdd_d;
+ struct regulator_bulk_data core_supplies[CS42L43_N_SUPPLIES];
+
+ struct gpio_desc *reset;
+
+ int irq;
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct work_struct boot_work;
+ struct completion device_attach;
+ struct completion device_detach;
+ struct completion firmware_download;
+ int firmware_error;
+
+ unsigned int sdw_freq;
+ /* Lock to gate control of the PLL and its sources. */
+ struct mutex pll_lock;
+
+ bool sdw_pll_active;
+ bool attached;
+ bool hw_lock;
+};
+
+#endif /* CS42L43_CORE_EXT_H */
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index e7a7e70fdb38..dd0fc891b228 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -556,16 +556,6 @@ static inline void prcmu_clear(unsigned int reg, u32 bits)
#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
-{
- return 0;
-}
-
-static inline int prcmu_qos_requirement(int prcmu_qos_class)
-{
- return 0;
-}
-
static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
char *name, s32 value)
{
@@ -582,15 +572,4 @@ static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
{
}
-static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index 6a012784dd1b..194556851ccf 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -52,7 +52,6 @@
#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT)
struct hi655x_pmic {
- struct resource *res;
struct device *dev;
struct regmap *regmap;
struct gpio_desc *gpio;
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
index 1812ebfa11a8..ee66c9751003 100644
--- a/include/linux/mfd/intel-m10-bmc.h
+++ b/include/linux/mfd/intel-m10-bmc.h
@@ -11,6 +11,7 @@
#include <linux/bits.h>
#include <linux/dev_printk.h>
#include <linux/regmap.h>
+#include <linux/rwsem.h>
#define M10BMC_N3000_LEGACY_BUILD_VER 0x300468
#define M10BMC_N3000_SYS_BASE 0x300800
@@ -39,6 +40,11 @@
#define M10BMC_N3000_VER_PCB_INFO_MSK GENMASK(31, 24)
#define M10BMC_N3000_VER_LEGACY_INVALID 0xffffffff
+/* Telemetry registers */
+#define M10BMC_N3000_TELEM_START 0x100
+#define M10BMC_N3000_TELEM_END 0x250
+#define M10BMC_D5005_TELEM_END 0x300
+
/* Secure update doorbell register, in system register region */
#define M10BMC_N3000_DOORBELL 0x400
@@ -205,11 +211,15 @@ struct m10bmc_csr_map {
* struct intel_m10bmc_platform_info - Intel MAX 10 BMC platform specific information
* @cells: MFD cells
* @n_cells: MFD cells ARRAY_SIZE()
+ * @handshake_sys_reg_ranges: array of register ranges for fw handshake regs
+ * @handshake_sys_reg_nranges: number of register ranges for fw handshake regs
* @csr_map: the mappings for register definition of MAX10 BMC
*/
struct intel_m10bmc_platform_info {
struct mfd_cell *cells;
int n_cells;
+ const struct regmap_range *handshake_sys_reg_ranges;
+ unsigned int handshake_sys_reg_nranges;
const struct m10bmc_csr_map *csr_map;
};
@@ -232,18 +242,30 @@ struct intel_m10bmc_flash_bulk_ops {
void (*unlock_write)(struct intel_m10bmc *m10bmc);
};
+enum m10bmc_fw_state {
+ M10BMC_FW_STATE_NORMAL,
+ M10BMC_FW_STATE_SEC_UPDATE_PREPARE,
+ M10BMC_FW_STATE_SEC_UPDATE_WRITE,
+ M10BMC_FW_STATE_SEC_UPDATE_PROGRAM,
+};
+
/**
* struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
* @dev: this device
* @regmap: the regmap used to access registers by m10bmc itself
* @info: the platform information for MAX10 BMC
* @flash_bulk_ops: optional device specific operations for flash R/W
+ * @bmcfw_lock: read/write semaphore to BMC firmware running state
+ * @bmcfw_state: BMC firmware running state. Available only when
+ * handshake_sys_reg_nranges > 0.
*/
struct intel_m10bmc {
struct device *dev;
struct regmap *regmap;
const struct intel_m10bmc_platform_info *info;
const struct intel_m10bmc_flash_bulk_ops *flash_bulk_ops;
+ struct rw_semaphore bmcfw_lock; /* Protects bmcfw_state */
+ enum m10bmc_fw_state bmcfw_state;
};
/*
@@ -251,6 +273,7 @@ struct intel_m10bmc {
*
* m10bmc_raw_read - read m10bmc register per addr
* m10bmc_sys_read - read m10bmc system register per offset
+ * m10bmc_sys_update_bits - update m10bmc system register per offset
*/
static inline int
m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
@@ -266,21 +289,15 @@ m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
return ret;
}
+int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset, unsigned int *val);
+int m10bmc_sys_update_bits(struct intel_m10bmc *m10bmc, unsigned int offset,
+ unsigned int msk, unsigned int val);
+
/*
- * The base of the system registers could be configured by HW developers, and
- * in HW SPEC, the base is not added to the addresses of the system registers.
- *
- * This function helps to simplify the accessing of the system registers. And if
- * the base is reconfigured in HW, SW developers could simply change the
- * csr_map's base accordingly.
+ * Track the state of the firmware, as it is not available for register
+ * handshakes during secure updates on some MAX 10 cards.
*/
-static inline int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset,
- unsigned int *val)
-{
- const struct m10bmc_csr_map *csr_map = m10bmc->info->csr_map;
-
- return m10bmc_raw_read(m10bmc, csr_map->base + offset, val);
-}
+void m10bmc_fw_state_set(struct intel_m10bmc *m10bmc, enum m10bmc_fw_state new_state);
/*
* MAX10 BMC Core support
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index ea4a4b1b246a..1fbda1f8967d 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -15,7 +15,7 @@
#define ICH_RES_GPE0 1
/* GPIO compatibility */
-enum {
+enum lpc_gpio_versions {
ICH_I3100_GPIO,
ICH_V5_GPIO,
ICH_V6_GPIO,
@@ -26,11 +26,14 @@ enum {
AVOTON_GPIO,
};
+struct lpc_ich_gpio_info;
+
struct lpc_ich_info {
char name[32];
unsigned int iTCO_version;
- unsigned int gpio_version;
+ enum lpc_gpio_versions gpio_version;
enum intel_spi_type spi_type;
+ const struct lpc_ich_gpio_info *gpio_info;
u8 use_gpio;
};
diff --git a/include/linux/mfd/max597x.h b/include/linux/mfd/max5970.h
index a850b2e02e6a..762a7d40c843 100644
--- a/include/linux/mfd/max597x.h
+++ b/include/linux/mfd/max5970.h
@@ -7,25 +7,25 @@
* Author: Patrick Rudolph <patrick.rudolph@9elements.com>
*/
-#ifndef _MFD_MAX597X_H
-#define _MFD_MAX597X_H
+#ifndef _MFD_MAX5970_H
+#define _MFD_MAX5970_H
#include <linux/regmap.h>
#define MAX5970_NUM_SWITCHES 2
#define MAX5978_NUM_SWITCHES 1
-#define MAX597X_NUM_LEDS 4
+#define MAX5970_NUM_LEDS 4
-struct max597x_data {
+struct max5970_data {
int num_switches;
u32 irng[MAX5970_NUM_SWITCHES];
u32 mon_rng[MAX5970_NUM_SWITCHES];
u32 shunt_micro_ohms[MAX5970_NUM_SWITCHES];
};
-enum max597x_chip_type {
- MAX597x_TYPE_MAX5978 = 1,
- MAX597x_TYPE_MAX5970,
+enum max5970_chip_type {
+ TYPE_MAX5978 = 1,
+ TYPE_MAX5970,
};
#define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4)
@@ -93,4 +93,4 @@ enum max597x_chip_type {
#define MAX_REGISTERS 0x49
#define ADC_MASK 0x3FF
-#endif /* _MFD_MAX597X_H */
+#endif /* _MFD_MAX5970_H */
diff --git a/include/linux/mfd/max77541.h b/include/linux/mfd/max77541.h
new file mode 100644
index 000000000000..fe5c0a3dc637
--- /dev/null
+++ b/include/linux/mfd/max77541.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __MFD_MAX77541_H
+#define __MFD_MAX77541_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* REGISTERS */
+#define MAX77541_REG_INT_SRC 0x00
+#define MAX77541_REG_INT_SRC_M 0x01
+
+#define MAX77541_BIT_INT_SRC_TOPSYS BIT(0)
+#define MAX77541_BIT_INT_SRC_BUCK BIT(1)
+
+#define MAX77541_REG_TOPSYS_INT 0x02
+#define MAX77541_REG_TOPSYS_INT_M 0x03
+
+#define MAX77541_BIT_TOPSYS_INT_TJ_120C BIT(0)
+#define MAX77541_BIT_TOPSYS_INT_TJ_140C BIT(1)
+#define MAX77541_BIT_TOPSYS_INT_TSHDN BIT(2)
+#define MAX77541_BIT_TOPSYS_INT_UVLO BIT(3)
+#define MAX77541_BIT_TOPSYS_INT_ALT_SWO BIT(4)
+#define MAX77541_BIT_TOPSYS_INT_EXT_FREQ_DET BIT(5)
+
+/* REGULATORS */
+#define MAX77541_REG_BUCK_INT 0x20
+#define MAX77541_REG_BUCK_INT_M 0x21
+
+#define MAX77541_BIT_BUCK_INT_M1_POK_FLT BIT(0)
+#define MAX77541_BIT_BUCK_INT_M2_POK_FLT BIT(1)
+#define MAX77541_BIT_BUCK_INT_M1_SCFLT BIT(4)
+#define MAX77541_BIT_BUCK_INT_M2_SCFLT BIT(5)
+
+#define MAX77541_REG_EN_CTRL 0x0B
+
+#define MAX77541_BIT_M1_EN BIT(0)
+#define MAX77541_BIT_M2_EN BIT(1)
+
+#define MAX77541_REG_M1_VOUT 0x23
+#define MAX77541_REG_M2_VOUT 0x33
+
+#define MAX77541_BITS_MX_VOUT GENMASK(7, 0)
+
+#define MAX77541_REG_M1_CFG1 0x25
+#define MAX77541_REG_M2_CFG1 0x35
+
+#define MAX77541_BITS_MX_CFG1_RNG GENMASK(7, 6)
+
+/* ADC */
+#define MAX77541_REG_ADC_INT 0x70
+#define MAX77541_REG_ADC_INT_M 0x71
+
+#define MAX77541_BIT_ADC_INT_CH1_I BIT(0)
+#define MAX77541_BIT_ADC_INT_CH2_I BIT(1)
+#define MAX77541_BIT_ADC_INT_CH3_I BIT(2)
+#define MAX77541_BIT_ADC_INT_CH6_I BIT(5)
+
+#define MAX77541_REG_ADC_DATA_CH1 0x72
+#define MAX77541_REG_ADC_DATA_CH2 0x73
+#define MAX77541_REG_ADC_DATA_CH3 0x74
+#define MAX77541_REG_ADC_DATA_CH6 0x77
+
+/* INTERRUPT MASKS*/
+#define MAX77541_REG_INT_SRC_MASK 0x00
+#define MAX77541_REG_TOPSYS_INT_MASK 0x00
+#define MAX77541_REG_BUCK_INT_MASK 0x00
+
+#define MAX77541_MAX_REGULATORS 2
+
+enum max7754x_ids {
+ MAX77540 = 1,
+ MAX77541,
+};
+
+struct regmap;
+struct regmap_irq_chip_data;
+struct i2c_client;
+
+struct max77541 {
+ struct i2c_client *i2c;
+ struct regmap *regmap;
+ enum max7754x_ids id;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *irq_buck;
+ struct regmap_irq_chip_data *irq_topsys;
+ struct regmap_irq_chip_data *irq_adc;
+};
+
+#endif /* __MFD_MAX77541_H */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 3acceeedbaba..ea635d12a741 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -441,8 +441,4 @@ enum max77686_types {
TYPE_MAX77802,
};
-extern int max77686_irq_init(struct max77686_dev *max77686);
-extern void max77686_irq_exit(struct max77686_dev *max77686);
-extern int max77686_irq_resume(struct max77686_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77686_PRIV_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 311f7d3d2323..54444ff2a5de 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -405,7 +405,7 @@ enum max77693_haptic_reg {
MAX77693_HAPTIC_REG_END,
};
-/* max77693-pmic LSCNFG configuraton register */
+/* max77693-pmic LSCNFG configuration register */
#define MAX77693_PMIC_LOW_SYS_MASK 0x80
#define MAX77693_PMIC_LOW_SYS_SHIFT 7
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 0bc7454c4dbe..2fb4db67f110 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -198,7 +198,7 @@ enum max77843_irq_muic {
#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT)
#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT)
-/* Max77843 charger insterrupts */
+/* Max77843 charger interrupts */
#define MAX77843_CHG_BYP_I BIT(0)
#define MAX77843_CHG_BATP_I BIT(2)
#define MAX77843_CHG_BAT_I BIT(3)
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
index 3d33517f178c..d83e87298ac4 100644
--- a/include/linux/mfd/mt6358/registers.h
+++ b/include/linux/mfd/mt6358/registers.h
@@ -262,6 +262,12 @@
#define MT6358_LDO_VBIF28_CON3 0x1db0
#define MT6358_VCAMA1_ANA_CON0 0x1e08
#define MT6358_VCAMA2_ANA_CON0 0x1e0c
+#define MT6358_VFE28_ANA_CON0 0x1e10
+#define MT6358_VCN28_ANA_CON0 0x1e14
+#define MT6358_VBIF28_ANA_CON0 0x1e18
+#define MT6358_VAUD28_ANA_CON0 0x1e1c
+#define MT6358_VAUX18_ANA_CON0 0x1e20
+#define MT6358_VXO22_ANA_CON0 0x1e24
#define MT6358_VCN33_ANA_CON0 0x1e28
#define MT6358_VSIM1_ANA_CON0 0x1e2c
#define MT6358_VSIM2_ANA_CON0 0x1e30
@@ -288,4 +294,21 @@
#define MT6358_AUD_TOP_INT_CON0 0x2228
#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+/*
+ * MT6366 has no VCAM*, but has other regulators in its place. The names
+ * keep the MT6358 prefix for ease of use in the regulator driver.
+ */
+#define MT6358_LDO_VSRAM_CON5 0x1bf8
+#define MT6358_LDO_VM18_CON0 MT6358_LDO_VCAMA1_CON0
+#define MT6358_LDO_VM18_CON1 MT6358_LDO_VCAMA1_CON1
+#define MT6358_LDO_VM18_CON2 MT6358_LDO_VCAMA1_CON2
+#define MT6358_LDO_VMDDR_CON0 MT6358_LDO_VCAMA2_CON0
+#define MT6358_LDO_VMDDR_CON1 MT6358_LDO_VCAMA2_CON1
+#define MT6358_LDO_VMDDR_CON2 MT6358_LDO_VCAMA2_CON2
+#define MT6358_LDO_VSRAM_CORE_CON0 MT6358_LDO_VCAMD_CON0
+#define MT6358_LDO_VSRAM_CORE_DBG0 0x1cb6
+#define MT6358_LDO_VSRAM_CORE_DBG1 0x1cb8
+#define MT6358_VM18_ANA_CON0 MT6358_VCAMA1_ANA_CON0
+#define MT6358_VMDDR_ANA_CON0 MT6358_VCAMD_ANA_CON0
+
#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 9af1f3105f80..78e167a92483 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -289,6 +289,414 @@ enum rk805_reg {
#define RK805_INT_ALARM_EN (1 << 3)
#define RK805_INT_TIMER_EN (1 << 2)
+/* RK806 */
+#define RK806_POWER_EN0 0x0
+#define RK806_POWER_EN1 0x1
+#define RK806_POWER_EN2 0x2
+#define RK806_POWER_EN3 0x3
+#define RK806_POWER_EN4 0x4
+#define RK806_POWER_EN5 0x5
+#define RK806_POWER_SLP_EN0 0x6
+#define RK806_POWER_SLP_EN1 0x7
+#define RK806_POWER_SLP_EN2 0x8
+#define RK806_POWER_DISCHRG_EN0 0x9
+#define RK806_POWER_DISCHRG_EN1 0xA
+#define RK806_POWER_DISCHRG_EN2 0xB
+#define RK806_BUCK_FB_CONFIG 0xC
+#define RK806_SLP_LP_CONFIG 0xD
+#define RK806_POWER_FPWM_EN0 0xE
+#define RK806_POWER_FPWM_EN1 0xF
+#define RK806_BUCK1_CONFIG 0x10
+#define RK806_BUCK2_CONFIG 0x11
+#define RK806_BUCK3_CONFIG 0x12
+#define RK806_BUCK4_CONFIG 0x13
+#define RK806_BUCK5_CONFIG 0x14
+#define RK806_BUCK6_CONFIG 0x15
+#define RK806_BUCK7_CONFIG 0x16
+#define RK806_BUCK8_CONFIG 0x17
+#define RK806_BUCK9_CONFIG 0x18
+#define RK806_BUCK10_CONFIG 0x19
+#define RK806_BUCK1_ON_VSEL 0x1A
+#define RK806_BUCK2_ON_VSEL 0x1B
+#define RK806_BUCK3_ON_VSEL 0x1C
+#define RK806_BUCK4_ON_VSEL 0x1D
+#define RK806_BUCK5_ON_VSEL 0x1E
+#define RK806_BUCK6_ON_VSEL 0x1F
+#define RK806_BUCK7_ON_VSEL 0x20
+#define RK806_BUCK8_ON_VSEL 0x21
+#define RK806_BUCK9_ON_VSEL 0x22
+#define RK806_BUCK10_ON_VSEL 0x23
+#define RK806_BUCK1_SLP_VSEL 0x24
+#define RK806_BUCK2_SLP_VSEL 0x25
+#define RK806_BUCK3_SLP_VSEL 0x26
+#define RK806_BUCK4_SLP_VSEL 0x27
+#define RK806_BUCK5_SLP_VSEL 0x28
+#define RK806_BUCK6_SLP_VSEL 0x29
+#define RK806_BUCK7_SLP_VSEL 0x2A
+#define RK806_BUCK8_SLP_VSEL 0x2B
+#define RK806_BUCK9_SLP_VSEL 0x2D
+#define RK806_BUCK10_SLP_VSEL 0x2E
+#define RK806_BUCK_DEBUG1 0x30
+#define RK806_BUCK_DEBUG2 0x31
+#define RK806_BUCK_DEBUG3 0x32
+#define RK806_BUCK_DEBUG4 0x33
+#define RK806_BUCK_DEBUG5 0x34
+#define RK806_BUCK_DEBUG6 0x35
+#define RK806_BUCK_DEBUG7 0x36
+#define RK806_BUCK_DEBUG8 0x37
+#define RK806_BUCK_DEBUG9 0x38
+#define RK806_BUCK_DEBUG10 0x39
+#define RK806_BUCK_DEBUG11 0x3A
+#define RK806_BUCK_DEBUG12 0x3B
+#define RK806_BUCK_DEBUG13 0x3C
+#define RK806_BUCK_DEBUG14 0x3D
+#define RK806_BUCK_DEBUG15 0x3E
+#define RK806_BUCK_DEBUG16 0x3F
+#define RK806_BUCK_DEBUG17 0x40
+#define RK806_BUCK_DEBUG18 0x41
+#define RK806_NLDO_IMAX 0x42
+#define RK806_NLDO1_ON_VSEL 0x43
+#define RK806_NLDO2_ON_VSEL 0x44
+#define RK806_NLDO3_ON_VSEL 0x45
+#define RK806_NLDO4_ON_VSEL 0x46
+#define RK806_NLDO5_ON_VSEL 0x47
+#define RK806_NLDO1_SLP_VSEL 0x48
+#define RK806_NLDO2_SLP_VSEL 0x49
+#define RK806_NLDO3_SLP_VSEL 0x4A
+#define RK806_NLDO4_SLP_VSEL 0x4B
+#define RK806_NLDO5_SLP_VSEL 0x4C
+#define RK806_PLDO_IMAX 0x4D
+#define RK806_PLDO1_ON_VSEL 0x4E
+#define RK806_PLDO2_ON_VSEL 0x4F
+#define RK806_PLDO3_ON_VSEL 0x50
+#define RK806_PLDO4_ON_VSEL 0x51
+#define RK806_PLDO5_ON_VSEL 0x52
+#define RK806_PLDO6_ON_VSEL 0x53
+#define RK806_PLDO1_SLP_VSEL 0x54
+#define RK806_PLDO2_SLP_VSEL 0x55
+#define RK806_PLDO3_SLP_VSEL 0x56
+#define RK806_PLDO4_SLP_VSEL 0x57
+#define RK806_PLDO5_SLP_VSEL 0x58
+#define RK806_PLDO6_SLP_VSEL 0x59
+#define RK806_CHIP_NAME 0x5A
+#define RK806_CHIP_VER 0x5B
+#define RK806_OTP_VER 0x5C
+#define RK806_SYS_STS 0x5D
+#define RK806_SYS_CFG0 0x5E
+#define RK806_SYS_CFG1 0x5F
+#define RK806_SYS_OPTION 0x61
+#define RK806_SLEEP_CONFIG0 0x62
+#define RK806_SLEEP_CONFIG1 0x63
+#define RK806_SLEEP_CTR_SEL0 0x64
+#define RK806_SLEEP_CTR_SEL1 0x65
+#define RK806_SLEEP_CTR_SEL2 0x66
+#define RK806_SLEEP_CTR_SEL3 0x67
+#define RK806_SLEEP_CTR_SEL4 0x68
+#define RK806_SLEEP_CTR_SEL5 0x69
+#define RK806_DVS_CTRL_SEL0 0x6A
+#define RK806_DVS_CTRL_SEL1 0x6B
+#define RK806_DVS_CTRL_SEL2 0x6C
+#define RK806_DVS_CTRL_SEL3 0x6D
+#define RK806_DVS_CTRL_SEL4 0x6E
+#define RK806_DVS_CTRL_SEL5 0x6F
+#define RK806_DVS_START_CTRL 0x70
+#define RK806_SLEEP_GPIO 0x71
+#define RK806_SYS_CFG3 0x72
+#define RK806_ON_SOURCE 0x74
+#define RK806_OFF_SOURCE 0x75
+#define RK806_PWRON_KEY 0x76
+#define RK806_INT_STS0 0x77
+#define RK806_INT_MSK0 0x78
+#define RK806_INT_STS1 0x79
+#define RK806_INT_MSK1 0x7A
+#define RK806_GPIO_INT_CONFIG 0x7B
+#define RK806_DATA_REG0 0x7C
+#define RK806_DATA_REG1 0x7D
+#define RK806_DATA_REG2 0x7E
+#define RK806_DATA_REG3 0x7F
+#define RK806_DATA_REG4 0x80
+#define RK806_DATA_REG5 0x81
+#define RK806_DATA_REG6 0x82
+#define RK806_DATA_REG7 0x83
+#define RK806_DATA_REG8 0x84
+#define RK806_DATA_REG9 0x85
+#define RK806_DATA_REG10 0x86
+#define RK806_DATA_REG11 0x87
+#define RK806_DATA_REG12 0x88
+#define RK806_DATA_REG13 0x89
+#define RK806_DATA_REG14 0x8A
+#define RK806_DATA_REG15 0x8B
+#define RK806_TM_REG 0x8C
+#define RK806_OTP_EN_REG 0x8D
+#define RK806_FUNC_OTP_EN_REG 0x8E
+#define RK806_TEST_REG1 0x8F
+#define RK806_TEST_REG2 0x90
+#define RK806_TEST_REG3 0x91
+#define RK806_TEST_REG4 0x92
+#define RK806_TEST_REG5 0x93
+#define RK806_BUCK_VSEL_OTP_REG0 0x94
+#define RK806_BUCK_VSEL_OTP_REG1 0x95
+#define RK806_BUCK_VSEL_OTP_REG2 0x96
+#define RK806_BUCK_VSEL_OTP_REG3 0x97
+#define RK806_BUCK_VSEL_OTP_REG4 0x98
+#define RK806_BUCK_VSEL_OTP_REG5 0x99
+#define RK806_BUCK_VSEL_OTP_REG6 0x9A
+#define RK806_BUCK_VSEL_OTP_REG7 0x9B
+#define RK806_BUCK_VSEL_OTP_REG8 0x9C
+#define RK806_BUCK_VSEL_OTP_REG9 0x9D
+#define RK806_NLDO1_VSEL_OTP_REG0 0x9E
+#define RK806_NLDO1_VSEL_OTP_REG1 0x9F
+#define RK806_NLDO1_VSEL_OTP_REG2 0xA0
+#define RK806_NLDO1_VSEL_OTP_REG3 0xA1
+#define RK806_NLDO1_VSEL_OTP_REG4 0xA2
+#define RK806_PLDO_VSEL_OTP_REG0 0xA3
+#define RK806_PLDO_VSEL_OTP_REG1 0xA4
+#define RK806_PLDO_VSEL_OTP_REG2 0xA5
+#define RK806_PLDO_VSEL_OTP_REG3 0xA6
+#define RK806_PLDO_VSEL_OTP_REG4 0xA7
+#define RK806_PLDO_VSEL_OTP_REG5 0xA8
+#define RK806_BUCK_EN_OTP_REG1 0xA9
+#define RK806_NLDO_EN_OTP_REG1 0xAA
+#define RK806_PLDO_EN_OTP_REG1 0xAB
+#define RK806_BUCK_FB_RES_OTP_REG1 0xAC
+#define RK806_OTP_RESEV_REG0 0xAD
+#define RK806_OTP_RESEV_REG1 0xAE
+#define RK806_OTP_RESEV_REG2 0xAF
+#define RK806_OTP_RESEV_REG3 0xB0
+#define RK806_OTP_RESEV_REG4 0xB1
+#define RK806_BUCK_SEQ_REG0 0xB2
+#define RK806_BUCK_SEQ_REG1 0xB3
+#define RK806_BUCK_SEQ_REG2 0xB4
+#define RK806_BUCK_SEQ_REG3 0xB5
+#define RK806_BUCK_SEQ_REG4 0xB6
+#define RK806_BUCK_SEQ_REG5 0xB7
+#define RK806_BUCK_SEQ_REG6 0xB8
+#define RK806_BUCK_SEQ_REG7 0xB9
+#define RK806_BUCK_SEQ_REG8 0xBA
+#define RK806_BUCK_SEQ_REG9 0xBB
+#define RK806_BUCK_SEQ_REG10 0xBC
+#define RK806_BUCK_SEQ_REG11 0xBD
+#define RK806_BUCK_SEQ_REG12 0xBE
+#define RK806_BUCK_SEQ_REG13 0xBF
+#define RK806_BUCK_SEQ_REG14 0xC0
+#define RK806_BUCK_SEQ_REG15 0xC1
+#define RK806_BUCK_SEQ_REG16 0xC2
+#define RK806_BUCK_SEQ_REG17 0xC3
+#define RK806_HK_TRIM_REG1 0xC4
+#define RK806_HK_TRIM_REG2 0xC5
+#define RK806_BUCK_REF_TRIM_REG1 0xC6
+#define RK806_BUCK_REF_TRIM_REG2 0xC7
+#define RK806_BUCK_REF_TRIM_REG3 0xC8
+#define RK806_BUCK_REF_TRIM_REG4 0xC9
+#define RK806_BUCK_REF_TRIM_REG5 0xCA
+#define RK806_BUCK_OSC_TRIM_REG1 0xCB
+#define RK806_BUCK_OSC_TRIM_REG2 0xCC
+#define RK806_BUCK_OSC_TRIM_REG3 0xCD
+#define RK806_BUCK_OSC_TRIM_REG4 0xCE
+#define RK806_BUCK_OSC_TRIM_REG5 0xCF
+#define RK806_BUCK_TRIM_ZCDIOS_REG1 0xD0
+#define RK806_BUCK_TRIM_ZCDIOS_REG2 0xD1
+#define RK806_NLDO_TRIM_REG1 0xD2
+#define RK806_NLDO_TRIM_REG2 0xD3
+#define RK806_NLDO_TRIM_REG3 0xD4
+#define RK806_PLDO_TRIM_REG1 0xD5
+#define RK806_PLDO_TRIM_REG2 0xD6
+#define RK806_PLDO_TRIM_REG3 0xD7
+#define RK806_TRIM_ICOMP_REG1 0xD8
+#define RK806_TRIM_ICOMP_REG2 0xD9
+#define RK806_EFUSE_CONTROL_REGH 0xDA
+#define RK806_FUSE_PROG_REG 0xDB
+#define RK806_MAIN_FSM_STS_REG 0xDD
+#define RK806_FSM_REG 0xDE
+#define RK806_TOP_RESEV_OFFR 0xEC
+#define RK806_TOP_RESEV_POR 0xED
+#define RK806_BUCK_VRSN_REG1 0xEE
+#define RK806_BUCK_VRSN_REG2 0xEF
+#define RK806_NLDO_RLOAD_SEL_REG1 0xF0
+#define RK806_PLDO_RLOAD_SEL_REG1 0xF1
+#define RK806_PLDO_RLOAD_SEL_REG2 0xF2
+#define RK806_BUCK_CMIN_MX_REG1 0xF3
+#define RK806_BUCK_CMIN_MX_REG2 0xF4
+#define RK806_BUCK_FREQ_SET_REG1 0xF5
+#define RK806_BUCK_FREQ_SET_REG2 0xF6
+#define RK806_BUCK_RS_MEABS_REG1 0xF7
+#define RK806_BUCK_RS_MEABS_REG2 0xF8
+#define RK806_BUCK_RS_ZDLEB_REG1 0xF9
+#define RK806_BUCK_RS_ZDLEB_REG2 0xFA
+#define RK806_BUCK_RSERVE_REG1 0xFB
+#define RK806_BUCK_RSERVE_REG2 0xFC
+#define RK806_BUCK_RSERVE_REG3 0xFD
+#define RK806_BUCK_RSERVE_REG4 0xFE
+#define RK806_BUCK_RSERVE_REG5 0xFF
+
+/* INT_STS Register field definitions */
+#define RK806_INT_STS_PWRON_FALL BIT(0)
+#define RK806_INT_STS_PWRON_RISE BIT(1)
+#define RK806_INT_STS_PWRON BIT(2)
+#define RK806_INT_STS_PWRON_LP BIT(3)
+#define RK806_INT_STS_HOTDIE BIT(4)
+#define RK806_INT_STS_VDC_RISE BIT(5)
+#define RK806_INT_STS_VDC_FALL BIT(6)
+#define RK806_INT_STS_VB_LO BIT(7)
+#define RK806_INT_STS_REV0 BIT(0)
+#define RK806_INT_STS_REV1 BIT(1)
+#define RK806_INT_STS_REV2 BIT(2)
+#define RK806_INT_STS_CRC_ERROR BIT(3)
+#define RK806_INT_STS_SLP3_GPIO BIT(4)
+#define RK806_INT_STS_SLP2_GPIO BIT(5)
+#define RK806_INT_STS_SLP1_GPIO BIT(6)
+#define RK806_INT_STS_WDT BIT(7)
+
+/* SPI command */
+#define RK806_CMD_READ 0
+#define RK806_CMD_WRITE BIT(7)
+#define RK806_CMD_CRC_EN BIT(6)
+#define RK806_CMD_CRC_DIS 0
+#define RK806_CMD_LEN_MSK 0x0f
+#define RK806_REG_H 0x00
+
+#define VERSION_AB 0x01
+
+enum rk806_reg_id {
+ RK806_ID_DCDC1 = 0,
+ RK806_ID_DCDC2,
+ RK806_ID_DCDC3,
+ RK806_ID_DCDC4,
+ RK806_ID_DCDC5,
+ RK806_ID_DCDC6,
+ RK806_ID_DCDC7,
+ RK806_ID_DCDC8,
+ RK806_ID_DCDC9,
+ RK806_ID_DCDC10,
+
+ RK806_ID_NLDO1,
+ RK806_ID_NLDO2,
+ RK806_ID_NLDO3,
+ RK806_ID_NLDO4,
+ RK806_ID_NLDO5,
+
+ RK806_ID_PLDO1,
+ RK806_ID_PLDO2,
+ RK806_ID_PLDO3,
+ RK806_ID_PLDO4,
+ RK806_ID_PLDO5,
+ RK806_ID_PLDO6,
+ RK806_ID_END,
+};
+
+/* Define the RK806 IRQ numbers */
+enum rk806_irqs {
+ /* INT_STS0 registers */
+ RK806_IRQ_PWRON_FALL,
+ RK806_IRQ_PWRON_RISE,
+ RK806_IRQ_PWRON,
+ RK806_IRQ_PWRON_LP,
+ RK806_IRQ_HOTDIE,
+ RK806_IRQ_VDC_RISE,
+ RK806_IRQ_VDC_FALL,
+ RK806_IRQ_VB_LO,
+
+ /* INT_STS0 registers */
+ RK806_IRQ_REV0,
+ RK806_IRQ_REV1,
+ RK806_IRQ_REV2,
+ RK806_IRQ_CRC_ERROR,
+ RK806_IRQ_SLP3_GPIO,
+ RK806_IRQ_SLP2_GPIO,
+ RK806_IRQ_SLP1_GPIO,
+ RK806_IRQ_WDT,
+};
+
+/* VCC1 Low Voltage Threshold */
+enum rk806_lv_sel {
+ VB_LO_SEL_2800,
+ VB_LO_SEL_2900,
+ VB_LO_SEL_3000,
+ VB_LO_SEL_3100,
+ VB_LO_SEL_3200,
+ VB_LO_SEL_3300,
+ VB_LO_SEL_3400,
+ VB_LO_SEL_3500,
+};
+
+/* System Shutdown Voltage Select */
+enum rk806_uv_sel {
+ VB_UV_SEL_2700,
+ VB_UV_SEL_2800,
+ VB_UV_SEL_2900,
+ VB_UV_SEL_3000,
+ VB_UV_SEL_3100,
+ VB_UV_SEL_3200,
+ VB_UV_SEL_3300,
+ VB_UV_SEL_3400,
+};
+
+/* Pin Function */
+enum rk806_pwrctrl_fun {
+ PWRCTRL_NULL_FUN,
+ PWRCTRL_SLP_FUN,
+ PWRCTRL_POWOFF_FUN,
+ PWRCTRL_RST_FUN,
+ PWRCTRL_DVS_FUN,
+ PWRCTRL_GPIO_FUN,
+};
+
+/* Pin Polarity */
+enum rk806_pin_level {
+ POL_LOW,
+ POL_HIGH,
+};
+
+enum rk806_vsel_ctr_sel {
+ CTR_BY_NO_EFFECT,
+ CTR_BY_PWRCTRL1,
+ CTR_BY_PWRCTRL2,
+ CTR_BY_PWRCTRL3,
+};
+
+enum rk806_dvs_ctr_sel {
+ CTR_SEL_NO_EFFECT,
+ CTR_SEL_DVS_START1,
+ CTR_SEL_DVS_START2,
+ CTR_SEL_DVS_START3,
+};
+
+enum rk806_pin_dr_sel {
+ RK806_PIN_INPUT,
+ RK806_PIN_OUTPUT,
+};
+
+#define RK806_INT_POL_MSK BIT(1)
+#define RK806_INT_POL_H BIT(1)
+#define RK806_INT_POL_L 0
+
+#define RK806_SLAVE_RESTART_FUN_MSK BIT(1)
+#define RK806_SLAVE_RESTART_FUN_EN BIT(1)
+#define RK806_SLAVE_RESTART_FUN_OFF 0
+
+#define RK806_SYS_ENB2_2M_MSK BIT(1)
+#define RK806_SYS_ENB2_2M_EN BIT(1)
+#define RK806_SYS_ENB2_2M_OFF 0
+
+enum rk806_int_fun {
+ RK806_INT_ONLY,
+ RK806_INT_ADN_WKUP,
+};
+
+enum rk806_dvs_mode {
+ RK806_DVS_NOT_SUPPORT,
+ RK806_DVS_START1,
+ RK806_DVS_START2,
+ RK806_DVS_START3,
+ RK806_DVS_PWRCTRL1,
+ RK806_DVS_PWRCTRL2,
+ RK806_DVS_PWRCTRL3,
+ RK806_DVS_START_PWRCTR1,
+ RK806_DVS_START_PWRCTR2,
+ RK806_DVS_START_PWRCTR3,
+ RK806_DVS_END,
+};
+
/* RK808 IRQ Definitions */
#define RK808_IRQ_VOUT_LO 0
#define RK808_IRQ_VB_LO 1
@@ -780,6 +1188,7 @@ enum {
enum {
RK805_ID = 0x8050,
+ RK806_ID = 0x8060,
RK808_ID = 0x0000,
RK809_ID = 0x8090,
RK817_ID = 0x8170,
@@ -787,11 +1196,17 @@ enum {
};
struct rk808 {
- struct i2c_client *i2c;
+ struct device *dev;
struct regmap_irq_chip_data *irq_data;
struct regmap *regmap;
long variant;
const struct regmap_config *regmap_cfg;
const struct regmap_irq_chip *regmap_irq_chip;
};
+
+void rk8xx_shutdown(struct device *dev);
+int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap *regmap);
+int rk8xx_suspend(struct device *dev);
+int rk8xx_resume(struct device *dev);
+
#endif /* __LINUX_REGULATOR_RK808_H */
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
index 6bb432f6a96c..0221f806d139 100644
--- a/include/linux/mfd/rt5033-private.h
+++ b/include/linux/mfd/rt5033-private.h
@@ -55,21 +55,28 @@ enum rt5033_reg {
};
/* RT5033 Charger state register */
-#define RT5033_CHG_STAT_MASK 0x20
+#define RT5033_CHG_STAT_TYPE_MASK 0x60
+#define RT5033_CHG_STAT_TYPE_PRE 0x20
+#define RT5033_CHG_STAT_TYPE_FAST 0x60
+#define RT5033_CHG_STAT_MASK 0x30
#define RT5033_CHG_STAT_DISCHARGING 0x00
#define RT5033_CHG_STAT_FULL 0x10
#define RT5033_CHG_STAT_CHARGING 0x20
#define RT5033_CHG_STAT_NOT_CHARGING 0x30
-#define RT5033_CHG_STAT_TYPE_MASK 0x60
-#define RT5033_CHG_STAT_TYPE_PRE 0x20
-#define RT5033_CHG_STAT_TYPE_FAST 0x60
/* RT5033 CHGCTRL1 register */
#define RT5033_CHGCTRL1_IAICR_MASK 0xe0
+#define RT5033_CHGCTRL1_TE_EN_MASK 0x08
+#define RT5033_CHGCTRL1_HZ_MASK 0x02
#define RT5033_CHGCTRL1_MODE_MASK 0x01
/* RT5033 CHGCTRL2 register */
#define RT5033_CHGCTRL2_CV_MASK 0xfc
+#define RT5033_CHGCTRL2_CV_SHIFT 0x02
+
+/* RT5033 DEVICE_ID register */
+#define RT5033_VENDOR_ID_MASK 0xf0
+#define RT5033_CHIP_REV_MASK 0x0f
/* RT5033 CHGCTRL3 register */
#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40
@@ -77,18 +84,18 @@ enum rt5033_reg {
#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01
/* RT5033 CHGCTRL4 register */
-#define RT5033_CHGCTRL4_EOC_MASK 0x07
+#define RT5033_CHGCTRL4_MIVR_MASK 0xe0
#define RT5033_CHGCTRL4_IPREC_MASK 0x18
+#define RT5033_CHGCTRL4_IPREC_SHIFT 0x03
+#define RT5033_CHGCTRL4_EOC_MASK 0x07
/* RT5033 CHGCTRL5 register */
-#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
#define RT5033_CHGCTRL5_ICHG_MASK 0xf0
#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04
-#define RT5033_CHG_MAX_CURRENT 0x0d
+#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
/* RT5033 RT CTRL1 register */
#define RT5033_RT_CTRL1_UUG_MASK 0x02
-#define RT5033_RT_HZ_MASK 0x01
/* RT5033 control register */
#define RT5033_CTRL_FCCM_BUCK_MASK BIT(0)
@@ -115,28 +122,37 @@ enum rt5033_reg {
* register), AICR mode limits the input current. For example, the AIRC 100
* mode limits the input current to 100 mA.
*/
+#define RT5033_AICR_DISABLE 0x00
#define RT5033_AICR_100_MODE 0x20
#define RT5033_AICR_500_MODE 0x40
#define RT5033_AICR_700_MODE 0x60
#define RT5033_AICR_900_MODE 0x80
+#define RT5033_AICR_1000_MODE 0xa0
#define RT5033_AICR_1500_MODE 0xc0
#define RT5033_AICR_2000_MODE 0xe0
-#define RT5033_AICR_MODE_MASK 0xe0
-/* RT5033 use internal timer need to set time */
-#define RT5033_FAST_CHARGE_TIMER4 0x00
-#define RT5033_FAST_CHARGE_TIMER6 0x01
-#define RT5033_FAST_CHARGE_TIMER8 0x02
-#define RT5033_FAST_CHARGE_TIMER9 0x03
-#define RT5033_FAST_CHARGE_TIMER12 0x04
-#define RT5033_FAST_CHARGE_TIMER14 0x05
-#define RT5033_FAST_CHARGE_TIMER16 0x06
+/* RT5033 charger minimum input voltage regulation */
+#define RT5033_CHARGER_MIVR_DISABLE 0x00
+#define RT5033_CHARGER_MIVR_4200MV 0x20
+#define RT5033_CHARGER_MIVR_4300MV 0x40
+#define RT5033_CHARGER_MIVR_4400MV 0x60
+#define RT5033_CHARGER_MIVR_4500MV 0x80
+#define RT5033_CHARGER_MIVR_4600MV 0xa0
+#define RT5033_CHARGER_MIVR_4700MV 0xc0
+#define RT5033_CHARGER_MIVR_4800MV 0xe0
+/* RT5033 use internal timer need to set time */
+#define RT5033_FAST_CHARGE_TIMER4 0x00 /* 4 hrs */
+#define RT5033_FAST_CHARGE_TIMER6 0x08 /* 6 hrs */
+#define RT5033_FAST_CHARGE_TIMER8 0x10 /* 8 hrs */
+#define RT5033_FAST_CHARGE_TIMER10 0x18 /* 10 hrs */
+#define RT5033_FAST_CHARGE_TIMER12 0x20 /* 12 hrs */
+#define RT5033_FAST_CHARGE_TIMER14 0x28 /* 14 hrs */
+#define RT5033_FAST_CHARGE_TIMER16 0x30 /* 16 hrs */
+
+#define RT5033_INT_TIMER_DISABLE 0x00
#define RT5033_INT_TIMER_ENABLE 0x01
-/* RT5033 charger termination enable mask */
-#define RT5033_TE_ENABLE_MASK 0x08
-
/*
* RT5033 charger opa mode. RT5033 has two opa modes for OTG: charger mode
* and boost mode.
@@ -145,25 +161,30 @@ enum rt5033_reg {
#define RT5033_BOOST_MODE 0x01
/* RT5033 charger termination enable */
+#define RT5033_TE_DISABLE 0x00
#define RT5033_TE_ENABLE 0x08
/* RT5033 charger CFO enable */
+#define RT5033_CFO_DISABLE 0x00
#define RT5033_CFO_ENABLE 0x40
/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U
#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U
+#define RT5033_CV_MAX_VOLTAGE 0x1e
/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U
#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U
+#define RT5033_CHG_MAX_PRE_CURRENT 0x03
/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */
#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U
#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U
+#define RT5033_CHG_MAX_CURRENT 0x0d
/*
* RT5033 charger const-charge end of charger current (
@@ -187,11 +208,12 @@ enum rt5033_reg {
* RT5033 charger UUG. It enables MOS auto control by H/W charger
* circuit.
*/
+#define RT5033_CHARGER_UUG_DISABLE 0x00
#define RT5033_CHARGER_UUG_ENABLE 0x02
/* RT5033 charger high impedance mode */
#define RT5033_CHARGER_HZ_DISABLE 0x00
-#define RT5033_CHARGER_HZ_ENABLE 0x01
+#define RT5033_CHARGER_HZ_ENABLE 0x02
/* RT5033 regulator BUCK output voltage uV */
#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
index 8f306ac15a27..bb3d18945d21 100644
--- a/include/linux/mfd/rt5033.h
+++ b/include/linux/mfd/rt5033.h
@@ -12,7 +12,6 @@
#include <linux/regulator/consumer.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
-#include <linux/power_supply.h>
/* RT5033 regulator IDs */
enum rt5033_regulators {
@@ -32,27 +31,4 @@ struct rt5033_dev {
bool wakeup;
};
-struct rt5033_battery {
- struct i2c_client *client;
- struct rt5033_dev *rt5033;
- struct regmap *regmap;
- struct power_supply *psy;
-};
-
-/* RT5033 charger platform data */
-struct rt5033_charger_data {
- unsigned int pre_uamp;
- unsigned int pre_uvolt;
- unsigned int const_uvolt;
- unsigned int eoc_uamp;
- unsigned int fast_uamp;
-};
-
-struct rt5033_charger {
- struct device *dev;
- struct rt5033_dev *rt5033;
- struct power_supply psy;
- struct rt5033_charger_data *chg;
-};
-
#endif /* __RT5033_H__ */
diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h
index c5173bc06270..8421d49500bf 100644
--- a/include/linux/mfd/rz-mtu3.h
+++ b/include/linux/mfd/rz-mtu3.h
@@ -151,7 +151,6 @@ struct rz_mtu3 {
void *priv_data;
};
-#if IS_ENABLED(CONFIG_RZ_MTU3)
static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
{
mutex_lock(&ch->lock);
@@ -188,70 +187,5 @@ void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val);
void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off,
u16 pos, u8 val);
-#else
-static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
-{
- return false;
-}
-
-static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
-{
-}
-
-static inline bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
-{
- return false;
-}
-
-static inline void rz_mtu3_disable(struct rz_mtu3_channel *ch)
-{
-}
-
-static inline int rz_mtu3_enable(struct rz_mtu3_channel *ch)
-{
- return 0;
-}
-
-static inline u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val)
-{
-}
-
-static inline void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
-{
-}
-
-static inline void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val)
-{
-}
-
-static inline void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
-{
-}
-
-static inline void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch,
- u16 off, u16 pos, u8 val)
-{
-}
-#endif
#endif /* __MFD_RZ_MTU3_H__ */
diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h
index 18363b773d07..cb99e16ca947 100644
--- a/include/linux/mfd/si476x-platform.h
+++ b/include/linux/mfd/si476x-platform.h
@@ -10,7 +10,7 @@
#ifndef __SI476X_PLATFORM_H__
#define __SI476X_PLATFORM_H__
-/* It is possible to select one of the four adresses using pins A0
+/* It is possible to select one of the four addresses using pins A0
* and A1 on SI476x */
#define SI476X_I2C_ADDR_1 0x60
#define SI476X_I2C_ADDR_2 0x61
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index 1b94325febb3..ca35af30745f 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -102,6 +102,15 @@ enum stm32_timers_dmas {
STM32_TIMERS_MAX_DMAS,
};
+/* STM32 Timer may have either a unique global interrupt or 4 interrupt lines */
+enum stm32_timers_irqs {
+ STM32_TIMERS_IRQ_GLOBAL_BRK, /* global or brk IRQ */
+ STM32_TIMERS_IRQ_UP,
+ STM32_TIMERS_IRQ_TRG_COM,
+ STM32_TIMERS_IRQ_CC,
+ STM32_TIMERS_MAX_IRQS,
+};
+
/**
* struct stm32_timers_dma - STM32 timer DMA handling.
* @completion: end of DMA transfer completion
@@ -123,6 +132,8 @@ struct stm32_timers {
struct regmap *regmap;
u32 max_arr;
struct stm32_timers_dma dma; /* Only to be used by the parent */
+ unsigned int nr_irqs;
+ int irq[STM32_TIMERS_MAX_IRQS];
};
#if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS)
diff --git a/include/linux/mfd/stpmic1.h b/include/linux/mfd/stpmic1.h
index fa3f99f7e9a1..dc00bac24f5a 100644
--- a/include/linux/mfd/stpmic1.h
+++ b/include/linux/mfd/stpmic1.h
@@ -15,7 +15,7 @@
#define RREQ_STATE_SR 0x5
#define VERSION_SR 0x6
-#define SWOFF_PWRCTRL_CR 0x10
+#define MAIN_CR 0x10
#define PADS_PULL_CR 0x11
#define BUCKS_PD_CR 0x12
#define LDO14_PD_CR 0x13
@@ -148,14 +148,14 @@
#define LDO_BYPASS_MASK BIT(7)
/* Main PMIC Control Register
- * SWOFF_PWRCTRL_CR
+ * MAIN_CR
* Address : 0x10
*/
-#define ICC_EVENT_ENABLED BIT(4)
+#define OCP_OFF_DBG BIT(4)
#define PWRCTRL_POLARITY_HIGH BIT(3)
-#define PWRCTRL_PIN_VALID BIT(2)
-#define RESTART_REQUEST_ENABLED BIT(1)
-#define SOFTWARE_SWITCH_OFF_ENABLED BIT(0)
+#define PWRCTRL_ENABLE BIT(2)
+#define RESTART_REQUEST_ENABLE BIT(1)
+#define SOFTWARE_SWITCH_OFF BIT(0)
/* Main PMIC PADS Control Register
* PADS_PULL_CR
diff --git a/include/linux/mfd/tps65010.h b/include/linux/mfd/tps65010.h
index a1fb9bc5311d..5edf1aef1118 100644
--- a/include/linux/mfd/tps65010.h
+++ b/include/linux/mfd/tps65010.h
@@ -28,6 +28,8 @@
#ifndef __LINUX_I2C_TPS65010_H
#define __LINUX_I2C_TPS65010_H
+struct gpio_chip;
+
/*
* ----------------------------------------------------------------------------
* Registers, all 8 bits
@@ -176,12 +178,10 @@ struct i2c_client;
/**
* struct tps65010_board - packages GPIO and LED lines
- * @base: the GPIO number to assign to GPIO-1
* @outmask: bit (N-1) is set to allow GPIO-N to be used as an
* (open drain) output
* @setup: optional callback issued once the GPIOs are valid
* @teardown: optional callback issued before the GPIOs are invalidated
- * @context: optional parameter passed to setup() and teardown()
*
* Board data may be used to package the GPIO (and LED) lines for use
* in by the generic GPIO and LED frameworks. The first four GPIOs
@@ -193,12 +193,9 @@ struct i2c_client;
* devices in their initial states using these GPIOs.
*/
struct tps65010_board {
- int base;
unsigned outmask;
-
- int (*setup)(struct i2c_client *client, void *context);
- int (*teardown)(struct i2c_client *client, void *context);
- void *context;
+ int (*setup)(struct i2c_client *client, struct gpio_chip *gc);
+ void (*teardown)(struct i2c_client *client, struct gpio_chip *gc);
};
#endif /* __LINUX_I2C_TPS65010_H */
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
index 16f87cccc003..9185b5cd8371 100644
--- a/include/linux/mfd/tps65086.h
+++ b/include/linux/mfd/tps65086.h
@@ -13,8 +13,9 @@
#include <linux/regmap.h>
/* List of registers for TPS65086 */
-#define TPS65086_DEVICEID 0x01
-#define TPS65086_IRQ 0x02
+#define TPS65086_DEVICEID1 0x00
+#define TPS65086_DEVICEID2 0x01
+#define TPS65086_IRQ 0x02
#define TPS65086_IRQ_MASK 0x03
#define TPS65086_PMICSTAT 0x04
#define TPS65086_SHUTDNSRC 0x05
@@ -75,10 +76,16 @@
#define TPS65086_IRQ_SHUTDN_MASK BIT(3)
#define TPS65086_IRQ_FAULT_MASK BIT(7)
-/* DEVICEID Register field definitions */
-#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0)
-#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4)
-#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6)
+/* DEVICEID1 Register field definitions */
+#define TPS6508640_ID 0x00
+#define TPS65086401_ID 0x01
+#define TPS6508641_ID 0x10
+#define TPS65086470_ID 0x70
+
+/* DEVICEID2 Register field definitions */
+#define TPS65086_DEVICEID2_PART_MASK GENMASK(3, 0)
+#define TPS65086_DEVICEID2_OTP_MASK GENMASK(5, 4)
+#define TPS65086_DEVICEID2_REV_MASK GENMASK(7, 6)
/* VID Masks */
#define BUCK_VID_MASK GENMASK(7, 1)
@@ -92,6 +99,8 @@ enum tps65086_irqs {
TPS65086_IRQ_FAULT,
};
+struct tps65086_regulator_config;
+
/**
* struct tps65086 - state holder for the tps65086 driver
*
@@ -100,6 +109,8 @@ enum tps65086_irqs {
struct tps65086 {
struct device *dev;
struct regmap *regmap;
+ unsigned int chip_id;
+ const struct tps65086_regulator_config *reg_config;
/* IRQ Data */
int irq;
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 701925db75b3..f67ef0a4e041 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -749,7 +749,7 @@
#define VDDCTRL_ST_SHIFT 0
-/*Register VDDCTRL_OP (0x28) bit definitios */
+/*Register VDDCTRL_OP (0x28) bit definitions */
#define VDDCTRL_OP_CMD_MASK 0x80
#define VDDCTRL_OP_CMD_SHIFT 7
#define VDDCTRL_OP_SEL_MASK 0x7F
diff --git a/include/linux/mfd/tps6594.h b/include/linux/mfd/tps6594.h
new file mode 100644
index 000000000000..3f7c5e23cd4c
--- /dev/null
+++ b/include/linux/mfd/tps6594.h
@@ -0,0 +1,1020 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions to access TPS6594 Power Management IC
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#ifndef __LINUX_MFD_TPS6594_H
+#define __LINUX_MFD_TPS6594_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+struct regmap_irq_chip_data;
+
+/* Chip id list */
+enum pmic_id {
+ TPS6594,
+ TPS6593,
+ LP8764,
+};
+
+/* Macro to get page index from register address */
+#define TPS6594_REG_TO_PAGE(reg) ((reg) >> 8)
+
+/* Registers for page 0 of TPS6594 */
+#define TPS6594_REG_DEV_REV 0x01
+
+#define TPS6594_REG_NVM_CODE_1 0x02
+#define TPS6594_REG_NVM_CODE_2 0x03
+
+#define TPS6594_REG_BUCKX_CTRL(buck_inst) (0x04 + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_CONF(buck_inst) (0x05 + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_VOUT_1(buck_inst) (0x0e + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_VOUT_2(buck_inst) (0x0f + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_PG_WINDOW(buck_inst) (0x18 + (buck_inst))
+
+#define TPS6594_REG_LDOX_CTRL(ldo_inst) (0x1d + (ldo_inst))
+#define TPS6594_REG_LDORTC_CTRL 0x22
+#define TPS6594_REG_LDOX_VOUT(ldo_inst) (0x23 + (ldo_inst))
+#define TPS6594_REG_LDOX_PG_WINDOW(ldo_inst) (0x27 + (ldo_inst))
+
+#define TPS6594_REG_VCCA_VMON_CTRL 0x2b
+#define TPS6594_REG_VCCA_PG_WINDOW 0x2c
+#define TPS6594_REG_VMON1_PG_WINDOW 0x2d
+#define TPS6594_REG_VMON1_PG_LEVEL 0x2e
+#define TPS6594_REG_VMON2_PG_WINDOW 0x2f
+#define TPS6594_REG_VMON2_PG_LEVEL 0x30
+
+#define TPS6594_REG_GPIOX_CONF(gpio_inst) (0x31 + (gpio_inst))
+#define TPS6594_REG_NPWRON_CONF 0x3c
+#define TPS6594_REG_GPIO_OUT_1 0x3d
+#define TPS6594_REG_GPIO_OUT_2 0x3e
+#define TPS6594_REG_GPIO_IN_1 0x3f
+#define TPS6594_REG_GPIO_IN_2 0x40
+#define TPS6594_REG_GPIOX_OUT(gpio_inst) (TPS6594_REG_GPIO_OUT_1 + (gpio_inst) / 8)
+#define TPS6594_REG_GPIOX_IN(gpio_inst) (TPS6594_REG_GPIO_IN_1 + (gpio_inst) / 8)
+
+#define TPS6594_REG_GPIO_IN_1 0x3f
+#define TPS6594_REG_GPIO_IN_2 0x40
+
+#define TPS6594_REG_RAIL_SEL_1 0x41
+#define TPS6594_REG_RAIL_SEL_2 0x42
+#define TPS6594_REG_RAIL_SEL_3 0x43
+
+#define TPS6594_REG_FSM_TRIG_SEL_1 0x44
+#define TPS6594_REG_FSM_TRIG_SEL_2 0x45
+#define TPS6594_REG_FSM_TRIG_MASK_1 0x46
+#define TPS6594_REG_FSM_TRIG_MASK_2 0x47
+#define TPS6594_REG_FSM_TRIG_MASK_3 0x48
+
+#define TPS6594_REG_MASK_BUCK1_2 0x49
+#define TPS6594_REG_MASK_BUCK3_4 0x4a
+#define TPS6594_REG_MASK_BUCK5 0x4b
+#define TPS6594_REG_MASK_LDO1_2 0x4c
+#define TPS6594_REG_MASK_LDO3_4 0x4d
+#define TPS6594_REG_MASK_VMON 0x4e
+#define TPS6594_REG_MASK_GPIO1_8_FALL 0x4f
+#define TPS6594_REG_MASK_GPIO1_8_RISE 0x50
+#define TPS6594_REG_MASK_GPIO9_11 0x51
+#define TPS6594_REG_MASK_STARTUP 0x52
+#define TPS6594_REG_MASK_MISC 0x53
+#define TPS6594_REG_MASK_MODERATE_ERR 0x54
+#define TPS6594_REG_MASK_FSM_ERR 0x56
+#define TPS6594_REG_MASK_COMM_ERR 0x57
+#define TPS6594_REG_MASK_READBACK_ERR 0x58
+#define TPS6594_REG_MASK_ESM 0x59
+
+#define TPS6594_REG_INT_TOP 0x5a
+#define TPS6594_REG_INT_BUCK 0x5b
+#define TPS6594_REG_INT_BUCK1_2 0x5c
+#define TPS6594_REG_INT_BUCK3_4 0x5d
+#define TPS6594_REG_INT_BUCK5 0x5e
+#define TPS6594_REG_INT_LDO_VMON 0x5f
+#define TPS6594_REG_INT_LDO1_2 0x60
+#define TPS6594_REG_INT_LDO3_4 0x61
+#define TPS6594_REG_INT_VMON 0x62
+#define TPS6594_REG_INT_GPIO 0x63
+#define TPS6594_REG_INT_GPIO1_8 0x64
+#define TPS6594_REG_INT_STARTUP 0x65
+#define TPS6594_REG_INT_MISC 0x66
+#define TPS6594_REG_INT_MODERATE_ERR 0x67
+#define TPS6594_REG_INT_SEVERE_ERR 0x68
+#define TPS6594_REG_INT_FSM_ERR 0x69
+#define TPS6594_REG_INT_COMM_ERR 0x6a
+#define TPS6594_REG_INT_READBACK_ERR 0x6b
+#define TPS6594_REG_INT_ESM 0x6c
+
+#define TPS6594_REG_STAT_BUCK1_2 0x6d
+#define TPS6594_REG_STAT_BUCK3_4 0x6e
+#define TPS6594_REG_STAT_BUCK5 0x6f
+#define TPS6594_REG_STAT_LDO1_2 0x70
+#define TPS6594_REG_STAT_LDO3_4 0x71
+#define TPS6594_REG_STAT_VMON 0x72
+#define TPS6594_REG_STAT_STARTUP 0x73
+#define TPS6594_REG_STAT_MISC 0x74
+#define TPS6594_REG_STAT_MODERATE_ERR 0x75
+#define TPS6594_REG_STAT_SEVERE_ERR 0x76
+#define TPS6594_REG_STAT_READBACK_ERR 0x77
+
+#define TPS6594_REG_PGOOD_SEL_1 0x78
+#define TPS6594_REG_PGOOD_SEL_2 0x79
+#define TPS6594_REG_PGOOD_SEL_3 0x7a
+#define TPS6594_REG_PGOOD_SEL_4 0x7b
+
+#define TPS6594_REG_PLL_CTRL 0x7c
+
+#define TPS6594_REG_CONFIG_1 0x7d
+#define TPS6594_REG_CONFIG_2 0x7e
+
+#define TPS6594_REG_ENABLE_DRV_REG 0x80
+
+#define TPS6594_REG_MISC_CTRL 0x81
+
+#define TPS6594_REG_ENABLE_DRV_STAT 0x82
+
+#define TPS6594_REG_RECOV_CNT_REG_1 0x83
+#define TPS6594_REG_RECOV_CNT_REG_2 0x84
+
+#define TPS6594_REG_FSM_I2C_TRIGGERS 0x85
+#define TPS6594_REG_FSM_NSLEEP_TRIGGERS 0x86
+
+#define TPS6594_REG_BUCK_RESET_REG 0x87
+
+#define TPS6594_REG_SPREAD_SPECTRUM_1 0x88
+
+#define TPS6594_REG_FREQ_SEL 0x8a
+
+#define TPS6594_REG_FSM_STEP_SIZE 0x8b
+
+#define TPS6594_REG_LDO_RV_TIMEOUT_REG_1 0x8c
+#define TPS6594_REG_LDO_RV_TIMEOUT_REG_2 0x8d
+
+#define TPS6594_REG_USER_SPARE_REGS 0x8e
+
+#define TPS6594_REG_ESM_MCU_START_REG 0x8f
+#define TPS6594_REG_ESM_MCU_DELAY1_REG 0x90
+#define TPS6594_REG_ESM_MCU_DELAY2_REG 0x91
+#define TPS6594_REG_ESM_MCU_MODE_CFG 0x92
+#define TPS6594_REG_ESM_MCU_HMAX_REG 0x93
+#define TPS6594_REG_ESM_MCU_HMIN_REG 0x94
+#define TPS6594_REG_ESM_MCU_LMAX_REG 0x95
+#define TPS6594_REG_ESM_MCU_LMIN_REG 0x96
+#define TPS6594_REG_ESM_MCU_ERR_CNT_REG 0x97
+#define TPS6594_REG_ESM_SOC_START_REG 0x98
+#define TPS6594_REG_ESM_SOC_DELAY1_REG 0x99
+#define TPS6594_REG_ESM_SOC_DELAY2_REG 0x9a
+#define TPS6594_REG_ESM_SOC_MODE_CFG 0x9b
+#define TPS6594_REG_ESM_SOC_HMAX_REG 0x9c
+#define TPS6594_REG_ESM_SOC_HMIN_REG 0x9d
+#define TPS6594_REG_ESM_SOC_LMAX_REG 0x9e
+#define TPS6594_REG_ESM_SOC_LMIN_REG 0x9f
+#define TPS6594_REG_ESM_SOC_ERR_CNT_REG 0xa0
+
+#define TPS6594_REG_REGISTER_LOCK 0xa1
+
+#define TPS6594_REG_MANUFACTURING_VER 0xa6
+
+#define TPS6594_REG_CUSTOMER_NVM_ID_REG 0xa7
+
+#define TPS6594_REG_VMON_CONF_REG 0xa8
+
+#define TPS6594_REG_SOFT_REBOOT_REG 0xab
+
+#define TPS6594_REG_RTC_SECONDS 0xb5
+#define TPS6594_REG_RTC_MINUTES 0xb6
+#define TPS6594_REG_RTC_HOURS 0xb7
+#define TPS6594_REG_RTC_DAYS 0xb8
+#define TPS6594_REG_RTC_MONTHS 0xb9
+#define TPS6594_REG_RTC_YEARS 0xba
+#define TPS6594_REG_RTC_WEEKS 0xbb
+
+#define TPS6594_REG_ALARM_SECONDS 0xbc
+#define TPS6594_REG_ALARM_MINUTES 0xbd
+#define TPS6594_REG_ALARM_HOURS 0xbe
+#define TPS6594_REG_ALARM_DAYS 0xbf
+#define TPS6594_REG_ALARM_MONTHS 0xc0
+#define TPS6594_REG_ALARM_YEARS 0xc1
+
+#define TPS6594_REG_RTC_CTRL_1 0xc2
+#define TPS6594_REG_RTC_CTRL_2 0xc3
+#define TPS6594_REG_RTC_STATUS 0xc4
+#define TPS6594_REG_RTC_INTERRUPTS 0xc5
+#define TPS6594_REG_RTC_COMP_LSB 0xc6
+#define TPS6594_REG_RTC_COMP_MSB 0xc7
+#define TPS6594_REG_RTC_RESET_STATUS 0xc8
+
+#define TPS6594_REG_SCRATCH_PAD_REG_1 0xc9
+#define TPS6594_REG_SCRATCH_PAD_REG_2 0xca
+#define TPS6594_REG_SCRATCH_PAD_REG_3 0xcb
+#define TPS6594_REG_SCRATCH_PAD_REG_4 0xcc
+
+#define TPS6594_REG_PFSM_DELAY_REG_1 0xcd
+#define TPS6594_REG_PFSM_DELAY_REG_2 0xce
+#define TPS6594_REG_PFSM_DELAY_REG_3 0xcf
+#define TPS6594_REG_PFSM_DELAY_REG_4 0xd0
+
+/* Registers for page 1 of TPS6594 */
+#define TPS6594_REG_SERIAL_IF_CONFIG 0x11a
+#define TPS6594_REG_I2C1_ID 0x122
+#define TPS6594_REG_I2C2_ID 0x123
+
+/* Registers for page 4 of TPS6594 */
+#define TPS6594_REG_WD_ANSWER_REG 0x401
+#define TPS6594_REG_WD_QUESTION_ANSW_CNT 0x402
+#define TPS6594_REG_WD_WIN1_CFG 0x403
+#define TPS6594_REG_WD_WIN2_CFG 0x404
+#define TPS6594_REG_WD_LONGWIN_CFG 0x405
+#define TPS6594_REG_WD_MODE_REG 0x406
+#define TPS6594_REG_WD_QA_CFG 0x407
+#define TPS6594_REG_WD_ERR_STATUS 0x408
+#define TPS6594_REG_WD_THR_CFG 0x409
+#define TPS6594_REG_DWD_FAIL_CNT_REG 0x40a
+
+/* BUCKX_CTRL register field definition */
+#define TPS6594_BIT_BUCK_EN BIT(0)
+#define TPS6594_BIT_BUCK_FPWM BIT(1)
+#define TPS6594_BIT_BUCK_FPWM_MP BIT(2)
+#define TPS6594_BIT_BUCK_VSEL BIT(3)
+#define TPS6594_BIT_BUCK_VMON_EN BIT(4)
+#define TPS6594_BIT_BUCK_PLDN BIT(5)
+#define TPS6594_BIT_BUCK_RV_SEL BIT(7)
+
+/* BUCKX_CONF register field definition */
+#define TPS6594_MASK_BUCK_SLEW_RATE GENMASK(2, 0)
+#define TPS6594_MASK_BUCK_ILIM GENMASK(5, 3)
+
+/* BUCKX_PG_WINDOW register field definition */
+#define TPS6594_MASK_BUCK_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_BUCK_UV_THR GENMASK(5, 3)
+
+/* BUCKX VSET */
+#define TPS6594_MASK_BUCKS_VSET GENMASK(7, 0)
+
+/* LDOX_CTRL register field definition */
+#define TPS6594_BIT_LDO_EN BIT(0)
+#define TPS6594_BIT_LDO_SLOW_RAMP BIT(1)
+#define TPS6594_BIT_LDO_VMON_EN BIT(4)
+#define TPS6594_MASK_LDO_PLDN GENMASK(6, 5)
+#define TPS6594_BIT_LDO_RV_SEL BIT(7)
+
+/* LDORTC_CTRL register field definition */
+#define TPS6594_BIT_LDORTC_DIS BIT(0)
+
+/* LDOX_VOUT register field definition */
+#define TPS6594_MASK_LDO123_VSET GENMASK(6, 1)
+#define TPS6594_MASK_LDO4_VSET GENMASK(6, 0)
+#define TPS6594_BIT_LDO_BYPASS BIT(7)
+
+/* LDOX_PG_WINDOW register field definition */
+#define TPS6594_MASK_LDO_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_LDO_UV_THR GENMASK(5, 3)
+
+/* VCCA_VMON_CTRL register field definition */
+#define TPS6594_BIT_VMON_EN BIT(0)
+#define TPS6594_BIT_VMON1_EN BIT(1)
+#define TPS6594_BIT_VMON1_RV_SEL BIT(2)
+#define TPS6594_BIT_VMON2_EN BIT(3)
+#define TPS6594_BIT_VMON2_RV_SEL BIT(4)
+#define TPS6594_BIT_VMON_DEGLITCH_SEL BIT(5)
+
+/* VCCA_PG_WINDOW register field definition */
+#define TPS6594_MASK_VCCA_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_VCCA_UV_THR GENMASK(5, 3)
+#define TPS6594_BIT_VCCA_PG_SET BIT(6)
+
+/* VMONX_PG_WINDOW register field definition */
+#define TPS6594_MASK_VMONX_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_VMONX_UV_THR GENMASK(5, 3)
+#define TPS6594_BIT_VMONX_RANGE BIT(6)
+
+/* GPIOX_CONF register field definition */
+#define TPS6594_BIT_GPIO_DIR BIT(0)
+#define TPS6594_BIT_GPIO_OD BIT(1)
+#define TPS6594_BIT_GPIO_PU_SEL BIT(2)
+#define TPS6594_BIT_GPIO_PU_PD_EN BIT(3)
+#define TPS6594_BIT_GPIO_DEGLITCH_EN BIT(4)
+#define TPS6594_MASK_GPIO_SEL GENMASK(7, 5)
+
+/* NPWRON_CONF register field definition */
+#define TPS6594_BIT_NRSTOUT_OD BIT(0)
+#define TPS6594_BIT_ENABLE_PU_SEL BIT(2)
+#define TPS6594_BIT_ENABLE_PU_PD_EN BIT(3)
+#define TPS6594_BIT_ENABLE_DEGLITCH_EN BIT(4)
+#define TPS6594_BIT_ENABLE_POL BIT(5)
+#define TPS6594_MASK_NPWRON_SEL GENMASK(7, 6)
+
+/* GPIO_OUT_X register field definition */
+#define TPS6594_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst) % 8)
+
+/* GPIO_IN_X register field definition */
+#define TPS6594_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst) % 8)
+#define TPS6594_BIT_NPWRON_IN BIT(3)
+
+/* RAIL_SEL_1 register field definition */
+#define TPS6594_MASK_BUCK1_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_BUCK2_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_BUCK3_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_BUCK4_GRP_SEL GENMASK(7, 6)
+
+/* RAIL_SEL_2 register field definition */
+#define TPS6594_MASK_BUCK5_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_LDO1_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_LDO2_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_LDO3_GRP_SEL GENMASK(7, 6)
+
+/* RAIL_SEL_3 register field definition */
+#define TPS6594_MASK_LDO4_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_VCCA_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_VMON1_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_VMON2_GRP_SEL GENMASK(7, 6)
+
+/* FSM_TRIG_SEL_1 register field definition */
+#define TPS6594_MASK_MCU_RAIL_TRIG GENMASK(1, 0)
+#define TPS6594_MASK_SOC_RAIL_TRIG GENMASK(3, 2)
+#define TPS6594_MASK_OTHER_RAIL_TRIG GENMASK(5, 4)
+#define TPS6594_MASK_SEVERE_ERR_TRIG GENMASK(7, 6)
+
+/* FSM_TRIG_SEL_2 register field definition */
+#define TPS6594_MASK_MODERATE_ERR_TRIG GENMASK(1, 0)
+
+/* FSM_TRIG_MASK_X register field definition */
+#define TPS6594_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 8)
+#define TPS6594_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 8 + 1)
+
+/* MASK_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_ILIM_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* MASK_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_ILIM_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* MASK_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_MASK BIT(0)
+#define TPS6594_BIT_VCCA_UV_MASK BIT(1)
+#define TPS6594_BIT_VMON1_OV_MASK BIT(2)
+#define TPS6594_BIT_VMON1_UV_MASK BIT(3)
+#define TPS6594_BIT_VMON2_OV_MASK BIT(5)
+#define TPS6594_BIT_VMON2_UV_MASK BIT(6)
+
+/* MASK_GPIOX register field definition */
+#define TPS6594_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \
+ (gpio_inst) : (gpio_inst) % 8)
+#define TPS6594_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \
+ (gpio_inst) : (gpio_inst) % 8 + 3)
+
+/* MASK_STARTUP register field definition */
+#define TPS6594_BIT_NPWRON_START_MASK BIT(0)
+#define TPS6594_BIT_ENABLE_MASK BIT(1)
+#define TPS6594_BIT_FSD_MASK BIT(4)
+#define TPS6594_BIT_SOFT_REBOOT_MASK BIT(5)
+
+/* MASK_MISC register field definition */
+#define TPS6594_BIT_BIST_PASS_MASK BIT(0)
+#define TPS6594_BIT_EXT_CLK_MASK BIT(1)
+#define TPS6594_BIT_TWARN_MASK BIT(3)
+
+/* MASK_MODERATE_ERR register field definition */
+#define TPS6594_BIT_BIST_FAIL_MASK BIT(1)
+#define TPS6594_BIT_REG_CRC_ERR_MASK BIT(2)
+#define TPS6594_BIT_SPMI_ERR_MASK BIT(4)
+#define TPS6594_BIT_NPWRON_LONG_MASK BIT(5)
+#define TPS6594_BIT_NINT_READBACK_MASK BIT(6)
+#define TPS6594_BIT_NRSTOUT_READBACK_MASK BIT(7)
+
+/* MASK_FSM_ERR register field definition */
+#define TPS6594_BIT_IMM_SHUTDOWN_MASK BIT(0)
+#define TPS6594_BIT_ORD_SHUTDOWN_MASK BIT(1)
+#define TPS6594_BIT_MCU_PWR_ERR_MASK BIT(2)
+#define TPS6594_BIT_SOC_PWR_ERR_MASK BIT(3)
+
+/* MASK_COMM_ERR register field definition */
+#define TPS6594_BIT_COMM_FRM_ERR_MASK BIT(0)
+#define TPS6594_BIT_COMM_CRC_ERR_MASK BIT(1)
+#define TPS6594_BIT_COMM_ADR_ERR_MASK BIT(3)
+#define TPS6594_BIT_I2C2_CRC_ERR_MASK BIT(5)
+#define TPS6594_BIT_I2C2_ADR_ERR_MASK BIT(7)
+
+/* MASK_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_MASK BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_MASK BIT(3)
+
+/* MASK_ESM register field definition */
+#define TPS6594_BIT_ESM_SOC_PIN_MASK BIT(0)
+#define TPS6594_BIT_ESM_SOC_FAIL_MASK BIT(1)
+#define TPS6594_BIT_ESM_SOC_RST_MASK BIT(2)
+#define TPS6594_BIT_ESM_MCU_PIN_MASK BIT(3)
+#define TPS6594_BIT_ESM_MCU_FAIL_MASK BIT(4)
+#define TPS6594_BIT_ESM_MCU_RST_MASK BIT(5)
+
+/* INT_TOP register field definition */
+#define TPS6594_BIT_BUCK_INT BIT(0)
+#define TPS6594_BIT_LDO_VMON_INT BIT(1)
+#define TPS6594_BIT_GPIO_INT BIT(2)
+#define TPS6594_BIT_STARTUP_INT BIT(3)
+#define TPS6594_BIT_MISC_INT BIT(4)
+#define TPS6594_BIT_MODERATE_ERR_INT BIT(5)
+#define TPS6594_BIT_SEVERE_ERR_INT BIT(6)
+#define TPS6594_BIT_FSM_ERR_INT BIT(7)
+
+/* INT_BUCK register field definition */
+#define TPS6594_BIT_BUCK1_2_INT BIT(0)
+#define TPS6594_BIT_BUCK3_4_INT BIT(1)
+#define TPS6594_BIT_BUCK5_INT BIT(2)
+
+/* INT_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_INT(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_SC_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 2)
+#define TPS6594_BIT_BUCKX_ILIM_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* INT_LDO_VMON register field definition */
+#define TPS6594_BIT_LDO1_2_INT BIT(0)
+#define TPS6594_BIT_LDO3_4_INT BIT(1)
+#define TPS6594_BIT_VCCA_INT BIT(4)
+
+/* INT_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_SC_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 2)
+#define TPS6594_BIT_LDOX_ILIM_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* INT_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_INT BIT(0)
+#define TPS6594_BIT_VCCA_UV_INT BIT(1)
+#define TPS6594_BIT_VMON1_OV_INT BIT(2)
+#define TPS6594_BIT_VMON1_UV_INT BIT(3)
+#define TPS6594_BIT_VMON1_RV_INT BIT(4)
+#define TPS6594_BIT_VMON2_OV_INT BIT(5)
+#define TPS6594_BIT_VMON2_UV_INT BIT(6)
+#define TPS6594_BIT_VMON2_RV_INT BIT(7)
+
+/* INT_GPIO register field definition */
+#define TPS6594_BIT_GPIO9_INT BIT(0)
+#define TPS6594_BIT_GPIO10_INT BIT(1)
+#define TPS6594_BIT_GPIO11_INT BIT(2)
+#define TPS6594_BIT_GPIO1_8_INT BIT(3)
+
+/* INT_GPIOX register field definition */
+#define TPS6594_BIT_GPIOX_INT(gpio_inst) BIT(gpio_inst)
+
+/* INT_STARTUP register field definition */
+#define TPS6594_BIT_NPWRON_START_INT BIT(0)
+#define TPS6594_BIT_ENABLE_INT BIT(1)
+#define TPS6594_BIT_RTC_INT BIT(2)
+#define TPS6594_BIT_FSD_INT BIT(4)
+#define TPS6594_BIT_SOFT_REBOOT_INT BIT(5)
+
+/* INT_MISC register field definition */
+#define TPS6594_BIT_BIST_PASS_INT BIT(0)
+#define TPS6594_BIT_EXT_CLK_INT BIT(1)
+#define TPS6594_BIT_TWARN_INT BIT(3)
+
+/* INT_MODERATE_ERR register field definition */
+#define TPS6594_BIT_TSD_ORD_INT BIT(0)
+#define TPS6594_BIT_BIST_FAIL_INT BIT(1)
+#define TPS6594_BIT_REG_CRC_ERR_INT BIT(2)
+#define TPS6594_BIT_RECOV_CNT_INT BIT(3)
+#define TPS6594_BIT_SPMI_ERR_INT BIT(4)
+#define TPS6594_BIT_NPWRON_LONG_INT BIT(5)
+#define TPS6594_BIT_NINT_READBACK_INT BIT(6)
+#define TPS6594_BIT_NRSTOUT_READBACK_INT BIT(7)
+
+/* INT_SEVERE_ERR register field definition */
+#define TPS6594_BIT_TSD_IMM_INT BIT(0)
+#define TPS6594_BIT_VCCA_OVP_INT BIT(1)
+#define TPS6594_BIT_PFSM_ERR_INT BIT(2)
+
+/* INT_FSM_ERR register field definition */
+#define TPS6594_BIT_IMM_SHUTDOWN_INT BIT(0)
+#define TPS6594_BIT_ORD_SHUTDOWN_INT BIT(1)
+#define TPS6594_BIT_MCU_PWR_ERR_INT BIT(2)
+#define TPS6594_BIT_SOC_PWR_ERR_INT BIT(3)
+#define TPS6594_BIT_COMM_ERR_INT BIT(4)
+#define TPS6594_BIT_READBACK_ERR_INT BIT(5)
+#define TPS6594_BIT_ESM_INT BIT(6)
+#define TPS6594_BIT_WD_INT BIT(7)
+
+/* INT_COMM_ERR register field definition */
+#define TPS6594_BIT_COMM_FRM_ERR_INT BIT(0)
+#define TPS6594_BIT_COMM_CRC_ERR_INT BIT(1)
+#define TPS6594_BIT_COMM_ADR_ERR_INT BIT(3)
+#define TPS6594_BIT_I2C2_CRC_ERR_INT BIT(5)
+#define TPS6594_BIT_I2C2_ADR_ERR_INT BIT(7)
+
+/* INT_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_INT BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_INT BIT(3)
+
+/* INT_ESM register field definition */
+#define TPS6594_BIT_ESM_SOC_PIN_INT BIT(0)
+#define TPS6594_BIT_ESM_SOC_FAIL_INT BIT(1)
+#define TPS6594_BIT_ESM_SOC_RST_INT BIT(2)
+#define TPS6594_BIT_ESM_MCU_PIN_INT BIT(3)
+#define TPS6594_BIT_ESM_MCU_FAIL_INT BIT(4)
+#define TPS6594_BIT_ESM_MCU_RST_INT BIT(5)
+
+/* STAT_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_STAT(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_STAT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_ILIM_STAT(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* STAT_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_ILIM_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* STAT_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_STAT BIT(0)
+#define TPS6594_BIT_VCCA_UV_STAT BIT(1)
+#define TPS6594_BIT_VMON1_OV_STAT BIT(2)
+#define TPS6594_BIT_VMON1_UV_STAT BIT(3)
+#define TPS6594_BIT_VMON2_OV_STAT BIT(5)
+#define TPS6594_BIT_VMON2_UV_STAT BIT(6)
+
+/* STAT_STARTUP register field definition */
+#define TPS6594_BIT_ENABLE_STAT BIT(1)
+
+/* STAT_MISC register field definition */
+#define TPS6594_BIT_EXT_CLK_STAT BIT(1)
+#define TPS6594_BIT_TWARN_STAT BIT(3)
+
+/* STAT_MODERATE_ERR register field definition */
+#define TPS6594_BIT_TSD_ORD_STAT BIT(0)
+
+/* STAT_SEVERE_ERR register field definition */
+#define TPS6594_BIT_TSD_IMM_STAT BIT(0)
+#define TPS6594_BIT_VCCA_OVP_STAT BIT(1)
+
+/* STAT_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_STAT BIT(0)
+#define TPS6594_BIT_NINT_READBACK_STAT BIT(1)
+#define TPS6594_BIT_NRSTOUT_READBACK_STAT BIT(2)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_STAT BIT(3)
+
+/* PGOOD_SEL_1 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_BUCK1 GENMASK(1, 0)
+#define TPS6594_MASK_PGOOD_SEL_BUCK2 GENMASK(3, 2)
+#define TPS6594_MASK_PGOOD_SEL_BUCK3 GENMASK(5, 4)
+#define TPS6594_MASK_PGOOD_SEL_BUCK4 GENMASK(7, 6)
+
+/* PGOOD_SEL_2 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_BUCK5 GENMASK(1, 0)
+
+/* PGOOD_SEL_3 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_LDO1 GENMASK(1, 0)
+#define TPS6594_MASK_PGOOD_SEL_LDO2 GENMASK(3, 2)
+#define TPS6594_MASK_PGOOD_SEL_LDO3 GENMASK(5, 4)
+#define TPS6594_MASK_PGOOD_SEL_LDO4 GENMASK(7, 6)
+
+/* PGOOD_SEL_4 register field definition */
+#define TPS6594_BIT_PGOOD_SEL_VCCA BIT(0)
+#define TPS6594_BIT_PGOOD_SEL_VMON1 BIT(1)
+#define TPS6594_BIT_PGOOD_SEL_VMON2 BIT(2)
+#define TPS6594_BIT_PGOOD_SEL_TDIE_WARN BIT(3)
+#define TPS6594_BIT_PGOOD_SEL_NRSTOUT BIT(4)
+#define TPS6594_BIT_PGOOD_SEL_NRSTOUT_SOC BIT(5)
+#define TPS6594_BIT_PGOOD_POL BIT(6)
+#define TPS6594_BIT_PGOOD_WINDOW BIT(7)
+
+/* PLL_CTRL register field definition */
+#define TPS6594_MASK_EXT_CLK_FREQ GENMASK(1, 0)
+
+/* CONFIG_1 register field definition */
+#define TPS6594_BIT_TWARN_LEVEL BIT(0)
+#define TPS6594_BIT_TSD_ORD_LEVEL BIT(1)
+#define TPS6594_BIT_I2C1_HS BIT(3)
+#define TPS6594_BIT_I2C2_HS BIT(4)
+#define TPS6594_BIT_EN_ILIM_FSM_CTRL BIT(5)
+#define TPS6594_BIT_NSLEEP1_MASK BIT(6)
+#define TPS6594_BIT_NSLEEP2_MASK BIT(7)
+
+/* CONFIG_2 register field definition */
+#define TPS6594_BIT_BB_CHARGER_EN BIT(0)
+#define TPS6594_BIT_BB_ICHR BIT(1)
+#define TPS6594_MASK_BB_VEOC GENMASK(3, 2)
+#define TPS6594_BB_EOC_RDY BIT(7)
+
+/* ENABLE_DRV_REG register field definition */
+#define TPS6594_BIT_ENABLE_DRV BIT(0)
+
+/* MISC_CTRL register field definition */
+#define TPS6594_BIT_NRSTOUT BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC BIT(1)
+#define TPS6594_BIT_LPM_EN BIT(2)
+#define TPS6594_BIT_CLKMON_EN BIT(3)
+#define TPS6594_BIT_AMUXOUT_EN BIT(4)
+#define TPS6594_BIT_SEL_EXT_CLK BIT(5)
+#define TPS6594_MASK_SYNCCLKOUT_FREQ_SEL GENMASK(7, 6)
+
+/* ENABLE_DRV_STAT register field definition */
+#define TPS6594_BIT_EN_DRV_IN BIT(0)
+#define TPS6594_BIT_NRSTOUT_IN BIT(1)
+#define TPS6594_BIT_NRSTOUT_SOC_IN BIT(2)
+#define TPS6594_BIT_FORCE_EN_DRV_LOW BIT(3)
+#define TPS6594_BIT_SPMI_LPM_EN BIT(4)
+
+/* RECOV_CNT_REG_1 register field definition */
+#define TPS6594_MASK_RECOV_CNT GENMASK(3, 0)
+
+/* RECOV_CNT_REG_2 register field definition */
+#define TPS6594_MASK_RECOV_CNT_THR GENMASK(3, 0)
+#define TPS6594_BIT_RECOV_CNT_CLR BIT(4)
+
+/* FSM_I2C_TRIGGERS register field definition */
+#define TPS6594_BIT_TRIGGER_I2C(bit) BIT(bit)
+
+/* FSM_NSLEEP_TRIGGERS register field definition */
+#define TPS6594_BIT_NSLEEP1B BIT(0)
+#define TPS6594_BIT_NSLEEP2B BIT(1)
+
+/* BUCK_RESET_REG register field definition */
+#define TPS6594_BIT_BUCKX_RESET(buck_inst) BIT(buck_inst)
+
+/* SPREAD_SPECTRUM_1 register field definition */
+#define TPS6594_MASK_SS_DEPTH GENMASK(1, 0)
+#define TPS6594_BIT_SS_EN BIT(2)
+
+/* FREQ_SEL register field definition */
+#define TPS6594_BIT_BUCKX_FREQ_SEL(buck_inst) BIT(buck_inst)
+
+/* FSM_STEP_SIZE register field definition */
+#define TPS6594_MASK_PFSM_DELAY_STEP GENMASK(4, 0)
+
+/* LDO_RV_TIMEOUT_REG_1 register field definition */
+#define TPS6594_MASK_LDO1_RV_TIMEOUT GENMASK(3, 0)
+#define TPS6594_MASK_LDO2_RV_TIMEOUT GENMASK(7, 4)
+
+/* LDO_RV_TIMEOUT_REG_2 register field definition */
+#define TPS6594_MASK_LDO3_RV_TIMEOUT GENMASK(3, 0)
+#define TPS6594_MASK_LDO4_RV_TIMEOUT GENMASK(7, 4)
+
+/* USER_SPARE_REGS register field definition */
+#define TPS6594_BIT_USER_SPARE(bit) BIT(bit)
+
+/* ESM_MCU_START_REG register field definition */
+#define TPS6594_BIT_ESM_MCU_START BIT(0)
+
+/* ESM_MCU_MODE_CFG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT_TH GENMASK(3, 0)
+#define TPS6594_BIT_ESM_MCU_ENDRV BIT(5)
+#define TPS6594_BIT_ESM_MCU_EN BIT(6)
+#define TPS6594_BIT_ESM_MCU_MODE BIT(7)
+
+/* ESM_MCU_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT GENMASK(4, 0)
+
+/* ESM_SOC_START_REG register field definition */
+#define TPS6594_BIT_ESM_SOC_START BIT(0)
+
+/* ESM_SOC_MODE_CFG register field definition */
+#define TPS6594_MASK_ESM_SOC_ERR_CNT_TH GENMASK(3, 0)
+#define TPS6594_BIT_ESM_SOC_ENDRV BIT(5)
+#define TPS6594_BIT_ESM_SOC_EN BIT(6)
+#define TPS6594_BIT_ESM_SOC_MODE BIT(7)
+
+/* ESM_SOC_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_SOC_ERR_CNT GENMASK(4, 0)
+
+/* REGISTER_LOCK register field definition */
+#define TPS6594_BIT_REGISTER_LOCK_STATUS BIT(0)
+
+/* VMON_CONF register field definition */
+#define TPS6594_MASK_VMON1_SLEW_RATE GENMASK(2, 0)
+#define TPS6594_MASK_VMON2_SLEW_RATE GENMASK(5, 3)
+
+/* SOFT_REBOOT_REG register field definition */
+#define TPS6594_BIT_SOFT_REBOOT BIT(0)
+
+/* RTC_SECONDS & ALARM_SECONDS register field definition */
+#define TPS6594_MASK_SECOND_0 GENMASK(3, 0)
+#define TPS6594_MASK_SECOND_1 GENMASK(6, 4)
+
+/* RTC_MINUTES & ALARM_MINUTES register field definition */
+#define TPS6594_MASK_MINUTE_0 GENMASK(3, 0)
+#define TPS6594_MASK_MINUTE_1 GENMASK(6, 4)
+
+/* RTC_HOURS & ALARM_HOURS register field definition */
+#define TPS6594_MASK_HOUR_0 GENMASK(3, 0)
+#define TPS6594_MASK_HOUR_1 GENMASK(5, 4)
+#define TPS6594_BIT_PM_NAM BIT(7)
+
+/* RTC_DAYS & ALARM_DAYS register field definition */
+#define TPS6594_MASK_DAY_0 GENMASK(3, 0)
+#define TPS6594_MASK_DAY_1 GENMASK(5, 4)
+
+/* RTC_MONTHS & ALARM_MONTHS register field definition */
+#define TPS6594_MASK_MONTH_0 GENMASK(3, 0)
+#define TPS6594_BIT_MONTH_1 BIT(4)
+
+/* RTC_YEARS & ALARM_YEARS register field definition */
+#define TPS6594_MASK_YEAR_0 GENMASK(3, 0)
+#define TPS6594_MASK_YEAR_1 GENMASK(7, 4)
+
+/* RTC_WEEKS register field definition */
+#define TPS6594_MASK_WEEK GENMASK(2, 0)
+
+/* RTC_CTRL_1 register field definition */
+#define TPS6594_BIT_STOP_RTC BIT(0)
+#define TPS6594_BIT_ROUND_30S BIT(1)
+#define TPS6594_BIT_AUTO_COMP BIT(2)
+#define TPS6594_BIT_MODE_12_24 BIT(3)
+#define TPS6594_BIT_SET_32_COUNTER BIT(5)
+#define TPS6594_BIT_GET_TIME BIT(6)
+#define TPS6594_BIT_RTC_V_OPT BIT(7)
+
+/* RTC_CTRL_2 register field definition */
+#define TPS6594_BIT_XTAL_EN BIT(0)
+#define TPS6594_MASK_XTAL_SEL GENMASK(2, 1)
+#define TPS6594_BIT_LP_STANDBY_SEL BIT(3)
+#define TPS6594_BIT_FAST_BIST BIT(4)
+#define TPS6594_MASK_STARTUP_DEST GENMASK(6, 5)
+#define TPS6594_BIT_FIRST_STARTUP_DONE BIT(7)
+
+/* RTC_STATUS register field definition */
+#define TPS6594_BIT_RUN BIT(1)
+#define TPS6594_BIT_TIMER BIT(5)
+#define TPS6594_BIT_ALARM BIT(6)
+#define TPS6594_BIT_POWER_UP BIT(7)
+
+/* RTC_INTERRUPTS register field definition */
+#define TPS6594_MASK_EVERY GENMASK(1, 0)
+#define TPS6594_BIT_IT_TIMER BIT(2)
+#define TPS6594_BIT_IT_ALARM BIT(3)
+
+/* RTC_RESET_STATUS register field definition */
+#define TPS6594_BIT_RESET_STATUS_RTC BIT(0)
+
+/* SERIAL_IF_CONFIG register field definition */
+#define TPS6594_BIT_I2C_SPI_SEL BIT(0)
+#define TPS6594_BIT_I2C1_SPI_CRC_EN BIT(1)
+#define TPS6594_BIT_I2C2_CRC_EN BIT(2)
+#define TPS6594_MASK_T_CRC GENMASK(7, 3)
+
+/* WD_QUESTION_ANSW_CNT register field definition */
+#define TPS6594_MASK_WD_QUESTION GENMASK(3, 0)
+#define TPS6594_MASK_WD_ANSW_CNT GENMASK(5, 4)
+
+/* WD_MODE_REG register field definition */
+#define TPS6594_BIT_WD_RETURN_LONGWIN BIT(0)
+#define TPS6594_BIT_WD_MODE_SELECT BIT(1)
+#define TPS6594_BIT_WD_PWRHOLD BIT(2)
+
+/* WD_QA_CFG register field definition */
+#define TPS6594_MASK_WD_QUESTION_SEED GENMASK(3, 0)
+#define TPS6594_MASK_WD_QA_LFSR GENMASK(5, 4)
+#define TPS6594_MASK_WD_QA_FDBK GENMASK(7, 6)
+
+/* WD_ERR_STATUS register field definition */
+#define TPS6594_BIT_WD_LONGWIN_TIMEOUT_INT BIT(0)
+#define TPS6594_BIT_WD_TIMEOUT BIT(1)
+#define TPS6594_BIT_WD_TRIG_EARLY BIT(2)
+#define TPS6594_BIT_WD_ANSW_EARLY BIT(3)
+#define TPS6594_BIT_WD_SEQ_ERR BIT(4)
+#define TPS6594_BIT_WD_ANSW_ERR BIT(5)
+#define TPS6594_BIT_WD_FAIL_INT BIT(6)
+#define TPS6594_BIT_WD_RST_INT BIT(7)
+
+/* WD_THR_CFG register field definition */
+#define TPS6594_MASK_WD_RST_TH GENMASK(2, 0)
+#define TPS6594_MASK_WD_FAIL_TH GENMASK(5, 3)
+#define TPS6594_BIT_WD_EN BIT(6)
+#define TPS6594_BIT_WD_RST_EN BIT(7)
+
+/* WD_FAIL_CNT_REG register field definition */
+#define TPS6594_MASK_WD_FAIL_CNT GENMASK(3, 0)
+#define TPS6594_BIT_WD_FIRST_OK BIT(5)
+#define TPS6594_BIT_WD_BAD_EVENT BIT(6)
+
+/* CRC8 polynomial for I2C & SPI protocols */
+#define TPS6594_CRC8_POLYNOMIAL 0x07
+
+/* IRQs */
+enum tps6594_irqs {
+ /* INT_BUCK1_2 register */
+ TPS6594_IRQ_BUCK1_OV,
+ TPS6594_IRQ_BUCK1_UV,
+ TPS6594_IRQ_BUCK1_SC,
+ TPS6594_IRQ_BUCK1_ILIM,
+ TPS6594_IRQ_BUCK2_OV,
+ TPS6594_IRQ_BUCK2_UV,
+ TPS6594_IRQ_BUCK2_SC,
+ TPS6594_IRQ_BUCK2_ILIM,
+ /* INT_BUCK3_4 register */
+ TPS6594_IRQ_BUCK3_OV,
+ TPS6594_IRQ_BUCK3_UV,
+ TPS6594_IRQ_BUCK3_SC,
+ TPS6594_IRQ_BUCK3_ILIM,
+ TPS6594_IRQ_BUCK4_OV,
+ TPS6594_IRQ_BUCK4_UV,
+ TPS6594_IRQ_BUCK4_SC,
+ TPS6594_IRQ_BUCK4_ILIM,
+ /* INT_BUCK5 register */
+ TPS6594_IRQ_BUCK5_OV,
+ TPS6594_IRQ_BUCK5_UV,
+ TPS6594_IRQ_BUCK5_SC,
+ TPS6594_IRQ_BUCK5_ILIM,
+ /* INT_LDO1_2 register */
+ TPS6594_IRQ_LDO1_OV,
+ TPS6594_IRQ_LDO1_UV,
+ TPS6594_IRQ_LDO1_SC,
+ TPS6594_IRQ_LDO1_ILIM,
+ TPS6594_IRQ_LDO2_OV,
+ TPS6594_IRQ_LDO2_UV,
+ TPS6594_IRQ_LDO2_SC,
+ TPS6594_IRQ_LDO2_ILIM,
+ /* INT_LDO3_4 register */
+ TPS6594_IRQ_LDO3_OV,
+ TPS6594_IRQ_LDO3_UV,
+ TPS6594_IRQ_LDO3_SC,
+ TPS6594_IRQ_LDO3_ILIM,
+ TPS6594_IRQ_LDO4_OV,
+ TPS6594_IRQ_LDO4_UV,
+ TPS6594_IRQ_LDO4_SC,
+ TPS6594_IRQ_LDO4_ILIM,
+ /* INT_VMON register */
+ TPS6594_IRQ_VCCA_OV,
+ TPS6594_IRQ_VCCA_UV,
+ TPS6594_IRQ_VMON1_OV,
+ TPS6594_IRQ_VMON1_UV,
+ TPS6594_IRQ_VMON1_RV,
+ TPS6594_IRQ_VMON2_OV,
+ TPS6594_IRQ_VMON2_UV,
+ TPS6594_IRQ_VMON2_RV,
+ /* INT_GPIO register */
+ TPS6594_IRQ_GPIO9,
+ TPS6594_IRQ_GPIO10,
+ TPS6594_IRQ_GPIO11,
+ /* INT_GPIO1_8 register */
+ TPS6594_IRQ_GPIO1,
+ TPS6594_IRQ_GPIO2,
+ TPS6594_IRQ_GPIO3,
+ TPS6594_IRQ_GPIO4,
+ TPS6594_IRQ_GPIO5,
+ TPS6594_IRQ_GPIO6,
+ TPS6594_IRQ_GPIO7,
+ TPS6594_IRQ_GPIO8,
+ /* INT_STARTUP register */
+ TPS6594_IRQ_NPWRON_START,
+ TPS6594_IRQ_ENABLE,
+ TPS6594_IRQ_FSD,
+ TPS6594_IRQ_SOFT_REBOOT,
+ /* INT_MISC register */
+ TPS6594_IRQ_BIST_PASS,
+ TPS6594_IRQ_EXT_CLK,
+ TPS6594_IRQ_TWARN,
+ /* INT_MODERATE_ERR register */
+ TPS6594_IRQ_TSD_ORD,
+ TPS6594_IRQ_BIST_FAIL,
+ TPS6594_IRQ_REG_CRC_ERR,
+ TPS6594_IRQ_RECOV_CNT,
+ TPS6594_IRQ_SPMI_ERR,
+ TPS6594_IRQ_NPWRON_LONG,
+ TPS6594_IRQ_NINT_READBACK,
+ TPS6594_IRQ_NRSTOUT_READBACK,
+ /* INT_SEVERE_ERR register */
+ TPS6594_IRQ_TSD_IMM,
+ TPS6594_IRQ_VCCA_OVP,
+ TPS6594_IRQ_PFSM_ERR,
+ /* INT_FSM_ERR register */
+ TPS6594_IRQ_IMM_SHUTDOWN,
+ TPS6594_IRQ_ORD_SHUTDOWN,
+ TPS6594_IRQ_MCU_PWR_ERR,
+ TPS6594_IRQ_SOC_PWR_ERR,
+ /* INT_COMM_ERR register */
+ TPS6594_IRQ_COMM_FRM_ERR,
+ TPS6594_IRQ_COMM_CRC_ERR,
+ TPS6594_IRQ_COMM_ADR_ERR,
+ TPS6594_IRQ_I2C2_CRC_ERR,
+ TPS6594_IRQ_I2C2_ADR_ERR,
+ /* INT_READBACK_ERR register */
+ TPS6594_IRQ_EN_DRV_READBACK,
+ TPS6594_IRQ_NRSTOUT_SOC_READBACK,
+ /* INT_ESM register */
+ TPS6594_IRQ_ESM_SOC_PIN,
+ TPS6594_IRQ_ESM_SOC_FAIL,
+ TPS6594_IRQ_ESM_SOC_RST,
+ /* RTC_STATUS register */
+ TPS6594_IRQ_TIMER,
+ TPS6594_IRQ_ALARM,
+ TPS6594_IRQ_POWER_UP,
+};
+
+#define TPS6594_IRQ_NAME_BUCK1_OV "buck1_ov"
+#define TPS6594_IRQ_NAME_BUCK1_UV "buck1_uv"
+#define TPS6594_IRQ_NAME_BUCK1_SC "buck1_sc"
+#define TPS6594_IRQ_NAME_BUCK1_ILIM "buck1_ilim"
+#define TPS6594_IRQ_NAME_BUCK2_OV "buck2_ov"
+#define TPS6594_IRQ_NAME_BUCK2_UV "buck2_uv"
+#define TPS6594_IRQ_NAME_BUCK2_SC "buck2_sc"
+#define TPS6594_IRQ_NAME_BUCK2_ILIM "buck2_ilim"
+#define TPS6594_IRQ_NAME_BUCK3_OV "buck3_ov"
+#define TPS6594_IRQ_NAME_BUCK3_UV "buck3_uv"
+#define TPS6594_IRQ_NAME_BUCK3_SC "buck3_sc"
+#define TPS6594_IRQ_NAME_BUCK3_ILIM "buck3_ilim"
+#define TPS6594_IRQ_NAME_BUCK4_OV "buck4_ov"
+#define TPS6594_IRQ_NAME_BUCK4_UV "buck4_uv"
+#define TPS6594_IRQ_NAME_BUCK4_SC "buck4_sc"
+#define TPS6594_IRQ_NAME_BUCK4_ILIM "buck4_ilim"
+#define TPS6594_IRQ_NAME_BUCK5_OV "buck5_ov"
+#define TPS6594_IRQ_NAME_BUCK5_UV "buck5_uv"
+#define TPS6594_IRQ_NAME_BUCK5_SC "buck5_sc"
+#define TPS6594_IRQ_NAME_BUCK5_ILIM "buck5_ilim"
+#define TPS6594_IRQ_NAME_LDO1_OV "ldo1_ov"
+#define TPS6594_IRQ_NAME_LDO1_UV "ldo1_uv"
+#define TPS6594_IRQ_NAME_LDO1_SC "ldo1_sc"
+#define TPS6594_IRQ_NAME_LDO1_ILIM "ldo1_ilim"
+#define TPS6594_IRQ_NAME_LDO2_OV "ldo2_ov"
+#define TPS6594_IRQ_NAME_LDO2_UV "ldo2_uv"
+#define TPS6594_IRQ_NAME_LDO2_SC "ldo2_sc"
+#define TPS6594_IRQ_NAME_LDO2_ILIM "ldo2_ilim"
+#define TPS6594_IRQ_NAME_LDO3_OV "ldo3_ov"
+#define TPS6594_IRQ_NAME_LDO3_UV "ldo3_uv"
+#define TPS6594_IRQ_NAME_LDO3_SC "ldo3_sc"
+#define TPS6594_IRQ_NAME_LDO3_ILIM "ldo3_ilim"
+#define TPS6594_IRQ_NAME_LDO4_OV "ldo4_ov"
+#define TPS6594_IRQ_NAME_LDO4_UV "ldo4_uv"
+#define TPS6594_IRQ_NAME_LDO4_SC "ldo4_sc"
+#define TPS6594_IRQ_NAME_LDO4_ILIM "ldo4_ilim"
+#define TPS6594_IRQ_NAME_VCCA_OV "vcca_ov"
+#define TPS6594_IRQ_NAME_VCCA_UV "vcca_uv"
+#define TPS6594_IRQ_NAME_VMON1_OV "vmon1_ov"
+#define TPS6594_IRQ_NAME_VMON1_UV "vmon1_uv"
+#define TPS6594_IRQ_NAME_VMON1_RV "vmon1_rv"
+#define TPS6594_IRQ_NAME_VMON2_OV "vmon2_ov"
+#define TPS6594_IRQ_NAME_VMON2_UV "vmon2_uv"
+#define TPS6594_IRQ_NAME_VMON2_RV "vmon2_rv"
+#define TPS6594_IRQ_NAME_GPIO9 "gpio9"
+#define TPS6594_IRQ_NAME_GPIO10 "gpio10"
+#define TPS6594_IRQ_NAME_GPIO11 "gpio11"
+#define TPS6594_IRQ_NAME_GPIO1 "gpio1"
+#define TPS6594_IRQ_NAME_GPIO2 "gpio2"
+#define TPS6594_IRQ_NAME_GPIO3 "gpio3"
+#define TPS6594_IRQ_NAME_GPIO4 "gpio4"
+#define TPS6594_IRQ_NAME_GPIO5 "gpio5"
+#define TPS6594_IRQ_NAME_GPIO6 "gpio6"
+#define TPS6594_IRQ_NAME_GPIO7 "gpio7"
+#define TPS6594_IRQ_NAME_GPIO8 "gpio8"
+#define TPS6594_IRQ_NAME_NPWRON_START "npwron_start"
+#define TPS6594_IRQ_NAME_ENABLE "enable"
+#define TPS6594_IRQ_NAME_FSD "fsd"
+#define TPS6594_IRQ_NAME_SOFT_REBOOT "soft_reboot"
+#define TPS6594_IRQ_NAME_BIST_PASS "bist_pass"
+#define TPS6594_IRQ_NAME_EXT_CLK "ext_clk"
+#define TPS6594_IRQ_NAME_TWARN "twarn"
+#define TPS6594_IRQ_NAME_TSD_ORD "tsd_ord"
+#define TPS6594_IRQ_NAME_BIST_FAIL "bist_fail"
+#define TPS6594_IRQ_NAME_REG_CRC_ERR "reg_crc_err"
+#define TPS6594_IRQ_NAME_RECOV_CNT "recov_cnt"
+#define TPS6594_IRQ_NAME_SPMI_ERR "spmi_err"
+#define TPS6594_IRQ_NAME_NPWRON_LONG "npwron_long"
+#define TPS6594_IRQ_NAME_NINT_READBACK "nint_readback"
+#define TPS6594_IRQ_NAME_NRSTOUT_READBACK "nrstout_readback"
+#define TPS6594_IRQ_NAME_TSD_IMM "tsd_imm"
+#define TPS6594_IRQ_NAME_VCCA_OVP "vcca_ovp"
+#define TPS6594_IRQ_NAME_PFSM_ERR "pfsm_err"
+#define TPS6594_IRQ_NAME_IMM_SHUTDOWN "imm_shutdown"
+#define TPS6594_IRQ_NAME_ORD_SHUTDOWN "ord_shutdown"
+#define TPS6594_IRQ_NAME_MCU_PWR_ERR "mcu_pwr_err"
+#define TPS6594_IRQ_NAME_SOC_PWR_ERR "soc_pwr_err"
+#define TPS6594_IRQ_NAME_COMM_FRM_ERR "comm_frm_err"
+#define TPS6594_IRQ_NAME_COMM_CRC_ERR "comm_crc_err"
+#define TPS6594_IRQ_NAME_COMM_ADR_ERR "comm_adr_err"
+#define TPS6594_IRQ_NAME_EN_DRV_READBACK "en_drv_readback"
+#define TPS6594_IRQ_NAME_NRSTOUT_SOC_READBACK "nrstout_soc_readback"
+#define TPS6594_IRQ_NAME_ESM_SOC_PIN "esm_soc_pin"
+#define TPS6594_IRQ_NAME_ESM_SOC_FAIL "esm_soc_fail"
+#define TPS6594_IRQ_NAME_ESM_SOC_RST "esm_soc_rst"
+#define TPS6594_IRQ_NAME_TIMER "timer"
+#define TPS6594_IRQ_NAME_ALARM "alarm"
+#define TPS6594_IRQ_NAME_POWERUP "powerup"
+
+/**
+ * struct tps6594 - device private data structure
+ *
+ * @dev: MFD parent device
+ * @chip_id: chip ID
+ * @reg: I2C slave address or SPI chip select number
+ * @use_crc: if true, use CRC for I2C and SPI interface protocols
+ * @regmap: regmap for accessing the device registers
+ * @irq: irq generated by the device
+ * @irq_data: regmap irq data used for the irq chip
+ */
+struct tps6594 {
+ struct device *dev;
+ unsigned long chip_id;
+ unsigned short reg;
+ bool use_crc;
+ struct regmap *regmap;
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+bool tps6594_is_volatile_reg(struct device *dev, unsigned int reg);
+int tps6594_device_init(struct tps6594 *tps, bool enable_crc);
+
+#endif /* __LINUX_MFD_TPS6594_H */
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 6e3d99b7a0ee..c062d91a67d9 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -593,9 +593,6 @@ struct twl4030_gpio_platform_data {
*/
u32 pullups;
u32 pulldowns;
-
- int (*setup)(struct device *dev,
- unsigned gpio, unsigned ngpio);
};
struct twl4030_madc_platform_data {
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index f6de4b6ecfc7..d0f9b522f328 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -266,6 +266,7 @@ struct mhi_event_config {
* struct mhi_controller_config - Root MHI controller configuration
* @max_channels: Maximum number of channels supported
* @timeout_ms: Timeout value for operations. 0 means use default
+ * @ready_timeout_ms: Timeout value for waiting device to be ready (optional)
* @buf_len: Size of automatically allocated buffers. 0 means use default
* @num_channels: Number of channels defined in @ch_cfg
* @ch_cfg: Array of defined channels
@@ -277,6 +278,7 @@ struct mhi_event_config {
struct mhi_controller_config {
u32 max_channels;
u32 timeout_ms;
+ u32 ready_timeout_ms;
u32 buf_len;
u32 num_channels;
const struct mhi_channel_config *ch_cfg;
@@ -299,6 +301,10 @@ struct mhi_controller_config {
* @iova_start: IOMMU starting address for data (required)
* @iova_stop: IOMMU stop address for data (required)
* @fw_image: Firmware image name for normal booting (optional)
+ * @fw_data: Firmware image data content for normal booting, used only
+ * if fw_image is NULL and fbc_download is true (optional)
+ * @fw_sz: Firmware image data size for normal booting, used only if fw_image
+ * is NULL and fbc_download is true (optional)
* @edl_image: Firmware image name for emergency download mode (optional)
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size downloaded through BHIe (optional)
@@ -326,6 +332,7 @@ struct mhi_controller_config {
* @pm_mutex: Mutex for suspend/resume operation
* @pm_lock: Lock for protecting MHI power management state
* @timeout_ms: Timeout in ms for state transitions
+ * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional)
* @pm_state: MHI power management state
* @db_access: DB access states
* @ee: MHI device execution environment
@@ -384,6 +391,8 @@ struct mhi_controller {
dma_addr_t iova_start;
dma_addr_t iova_stop;
const char *fw_image;
+ const u8 *fw_data;
+ size_t fw_sz;
const char *edl_image;
size_t rddm_size;
size_t sbl_size;
@@ -413,6 +422,7 @@ struct mhi_controller {
struct mutex pm_mutex;
rwlock_t pm_lock;
u32 timeout_ms;
+ u32 ready_timeout_ms;
u32 pm_state;
u32 db_access;
enum mhi_ee_type ee;
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index f198a8ac7ee7..11bf3212f782 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -50,6 +50,27 @@ struct mhi_ep_db_info {
};
/**
+ * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
+ * @mhi_dev: MHI device associated with this buffer
+ * @dev_addr: Address of the buffer in endpoint
+ * @host_addr: Address of the bufffer in host
+ * @size: Size of the buffer
+ * @code: Transfer completion code
+ * @cb: Callback to be executed by controller drivers after transfer completion (async)
+ * @cb_buf: Opaque buffer to be passed to the callback
+ */
+struct mhi_ep_buf_info {
+ struct mhi_ep_device *mhi_dev;
+ void *dev_addr;
+ u64 host_addr;
+ size_t size;
+ int code;
+
+ void (*cb)(struct mhi_ep_buf_info *buf_info);
+ void *cb_buf;
+};
+
+/**
* struct mhi_ep_cntrl - MHI Endpoint controller structure
* @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
* Endpoint controller
@@ -82,8 +103,10 @@ struct mhi_ep_db_info {
* @raise_irq: CB function for raising IRQ to the host
* @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
* @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
- * @read_from_host: CB function for reading from host memory from endpoint
- * @write_to_host: CB function for writing to host memory from endpoint
+ * @read_sync: CB function for reading from host memory synchronously
+ * @write_sync: CB function for writing to host memory synchronously
+ * @read_async: CB function for reading from host memory asynchronously
+ * @write_async: CB function for writing to host memory asynchronously
* @mhi_state: MHI Endpoint state
* @max_chan: Maximum channels supported by the endpoint controller
* @mru: MRU (Maximum Receive Unit) value of the endpoint controller
@@ -128,14 +151,19 @@ struct mhi_ep_cntrl {
struct work_struct reset_work;
struct work_struct cmd_ring_work;
struct work_struct ch_ring_work;
+ struct kmem_cache *ring_item_cache;
+ struct kmem_cache *ev_ring_el_cache;
+ struct kmem_cache *tre_buf_cache;
void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
void __iomem **virt, size_t size);
void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
void __iomem *virt, size_t size);
- int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
- int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
+ int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
enum mhi_state mhi_state;
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 8bef1ab62bba..591bf5b5e8dc 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -41,9 +41,10 @@
#define PHY_ID_KSZ9477 0x00221631
/* struct phy_device dev_flags definitions */
-#define MICREL_PHY_50MHZ_CLK 0x00000001
-#define MICREL_PHY_FXEN 0x00000002
-#define MICREL_KSZ8_P1_ERRATA 0x00000003
+#define MICREL_PHY_50MHZ_CLK BIT(0)
+#define MICREL_PHY_FXEN BIT(1)
+#define MICREL_KSZ8_P1_ERRATA BIT(2)
+#define MICREL_NO_EEE BIT(3)
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
@@ -63,6 +64,10 @@
#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1)
#define KSZ886X_BMCR_DISABLE_LED BIT(0)
+/* PHY Special Control/Status Register (Reg 31) */
#define KSZ886X_CTRL_MDIX_STAT BIT(4)
+#define KSZ886X_CTRL_FORCE_LINK BIT(3)
+#define KSZ886X_CTRL_PWRSAVE BIT(2)
+#define KSZ886X_CTRL_REMOTE_LOOPBACK BIT(1)
#endif /* _MICREL_PHY_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 6241a1596a75..2ce13e8a309b 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,8 +7,8 @@
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>
-typedef struct page *new_page_t(struct page *page, unsigned long private);
-typedef void free_page_t(struct page *page, unsigned long private);
+typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
+typedef void free_folio_t(struct folio *folio, unsigned long private);
struct migration_target_control;
@@ -67,16 +67,16 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
-int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
+int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
-struct page *alloc_migration_target(struct page *page, unsigned long private);
+struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
-void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
- spinlock_t *ptl);
+void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
+ __releases(ptl);
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
@@ -85,11 +85,11 @@ int folio_migrate_mapping(struct address_space *mapping,
#else
static inline void putback_movable_pages(struct list_head *l) {}
-static inline int migrate_pages(struct list_head *l, new_page_t new,
- free_page_t free, unsigned long private, enum migrate_mode mode,
- int reason, unsigned int *ret_succeeded)
+static inline int migrate_pages(struct list_head *l, new_folio_t new,
+ free_folio_t free, unsigned long private,
+ enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{ return -ENOSYS; }
-static inline struct page *alloc_migration_target(struct page *page,
+static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private)
{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
@@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page)
}
#ifdef CONFIG_NUMA_BALANCING
-int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int node);
#else
-static inline int migrate_misplaced_page(struct page *page,
+static inline int migrate_misplaced_folio(struct folio *folio,
struct vm_area_struct *vma, int node)
{
return -EAGAIN; /* can't migrate now */
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
index fa940bbaf8ae..26b04f73f214 100644
--- a/include/linux/mii_timestamper.h
+++ b/include/linux/mii_timestamper.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
+#include <linux/net_tstamp.h>
struct phy_device;
@@ -51,7 +52,8 @@ struct mii_timestamper {
struct sk_buff *skb, int type);
int (*hwtstamp)(struct mii_timestamper *mii_ts,
- struct ifreq *ifreq);
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
void (*link_state)(struct mii_timestamper *mii_ts,
struct phy_device *phydev);
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 396df1121bff..2ec559284a9f 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -2,59 +2,77 @@
#ifndef _LINUX_MINMAX_H
#define _LINUX_MINMAX_H
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
#include <linux/const.h>
+#include <linux/types.h>
/*
* min()/max()/clamp() macros must accomplish three things:
*
- * - avoid multiple evaluations of the arguments (so side-effects like
+ * - Avoid multiple evaluations of the arguments (so side-effects like
* "x++" happen only once) when non-constant.
- * - perform strict type-checking (to generate warnings instead of
- * nasty runtime surprises). See the "unnecessary" pointer comparison
- * in __typecheck().
- * - retain result as a constant expressions when called with only
+ * - Retain result as a constant expressions when called with only
* constant expressions (to avoid tripping VLA warnings in stack
* allocation usage).
+ * - Perform signed v unsigned type-checking (to generate compile
+ * errors instead of nasty runtime surprises).
+ * - Unsigned char/short are always promoted to signed int and can be
+ * compared against signed or unsigned arguments.
+ * - Unsigned arguments can be compared against non-negative signed constants.
+ * - Comparison of a signed argument against an unsigned constant fails
+ * even if the constant is below __INT_MAX__ and could be cast to int.
*/
#define __typecheck(x, y) \
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
-#define __no_side_effects(x, y) \
- (__is_constexpr(x) && __is_constexpr(y))
+/* is_signed_type() isn't a constexpr for pointer types */
+#define __is_signed(x) \
+ __builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \
+ is_signed_type(typeof(x)), 0)
-#define __safe_cmp(x, y) \
- (__typecheck(x, y) && __no_side_effects(x, y))
+/* True for a non-negative signed int constant */
+#define __is_noneg_int(x) \
+ (__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0)
-#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
+#define __types_ok(x, y) \
+ (__is_signed(x) == __is_signed(y) || \
+ __is_signed((x) + 0) == __is_signed((y) + 0) || \
+ __is_noneg_int(x) || __is_noneg_int(y))
-#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
- typeof(x) unique_x = (x); \
- typeof(y) unique_y = (y); \
- __cmp(unique_x, unique_y, op); })
+#define __cmp_op_min <
+#define __cmp_op_max >
-#define __careful_cmp(x, y, op) \
- __builtin_choose_expr(__safe_cmp(x, y), \
- __cmp(x, y, op), \
- __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
+#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
+
+#define __cmp_once(op, x, y, unique_x, unique_y) ({ \
+ typeof(x) unique_x = (x); \
+ typeof(y) unique_y = (y); \
+ static_assert(__types_ok(x, y), \
+ #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
+ __cmp(op, unique_x, unique_y); })
+
+#define __careful_cmp(op, x, y) \
+ __builtin_choose_expr(__is_constexpr((x) - (y)), \
+ __cmp(op, x, y), \
+ __cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y)))
#define __clamp(val, lo, hi) \
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
-#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
- typeof(val) unique_val = (val); \
- typeof(lo) unique_lo = (lo); \
- typeof(hi) unique_hi = (hi); \
- __clamp(unique_val, unique_lo, unique_hi); })
-
-#define __clamp_input_check(lo, hi) \
- (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __is_constexpr((lo) > (hi)), (lo) > (hi), false)))
+#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
+ typeof(val) unique_val = (val); \
+ typeof(lo) unique_lo = (lo); \
+ typeof(hi) unique_hi = (hi); \
+ static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
+ (lo) <= (hi), true), \
+ "clamp() low limit " #lo " greater than high limit " #hi); \
+ static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \
+ static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \
+ __clamp(unique_val, unique_lo, unique_hi); })
#define __careful_clamp(val, lo, hi) ({ \
- __clamp_input_check(lo, hi) + \
- __builtin_choose_expr(__typecheck(val, lo) && __typecheck(val, hi) && \
- __typecheck(hi, lo) && __is_constexpr(val) && \
- __is_constexpr(lo) && __is_constexpr(hi), \
+ __builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \
__clamp(val, lo, hi), \
__clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
__UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
@@ -64,14 +82,31 @@
* @x: first value
* @y: second value
*/
-#define min(x, y) __careful_cmp(x, y, <)
+#define min(x, y) __careful_cmp(min, x, y)
/**
* max - return maximum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
-#define max(x, y) __careful_cmp(x, y, >)
+#define max(x, y) __careful_cmp(max, x, y)
+
+/**
+ * umin - return minimum of two non-negative values
+ * Signed types are zero extended to match a larger unsigned type.
+ * @x: first value
+ * @y: second value
+ */
+#define umin(x, y) \
+ __careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+
+/**
+ * umax - return maximum of two non-negative values
+ * @x: first value
+ * @y: second value
+ */
+#define umax(x, y) \
+ __careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
/**
* min3 - return minimum of three values
@@ -123,7 +158,7 @@
* @x: first value
* @y: second value
*/
-#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
+#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y))
/**
* max_t - return maximum of two values, using the specified type
@@ -131,7 +166,50 @@
* @x: first value
* @y: second value
*/
-#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
+#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y))
+
+/*
+ * Do not check the array parameter using __must_be_array().
+ * In the following legit use-case where the "array" passed is a simple pointer,
+ * __must_be_array() will return a failure.
+ * --- 8< ---
+ * int *buff
+ * ...
+ * min = min_array(buff, nb_items);
+ * --- 8< ---
+ *
+ * The first typeof(&(array)[0]) is needed in order to support arrays of both
+ * 'int *buff' and 'int buff[N]' types.
+ *
+ * The array can be an array of const items.
+ * typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order
+ * to discard the const qualifier for the __element variable.
+ */
+#define __minmax_array(op, array, len) ({ \
+ typeof(&(array)[0]) __array = (array); \
+ typeof(len) __len = (len); \
+ __unqual_scalar_typeof(__array[0]) __element = __array[--__len];\
+ while (__len--) \
+ __element = op(__element, __array[__len]); \
+ __element; })
+
+/**
+ * min_array - return minimum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define min_array(array, len) __minmax_array(min, array, len)
+
+/**
+ * max_array - return maximum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define max_array(array, len) __minmax_array(max, array, len)
/**
* clamp_t - return a value clamped to a given range using a given type
@@ -158,6 +236,32 @@
*/
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
+static inline bool in_range64(u64 val, u64 start, u64 len)
+{
+ return (val - start) < len;
+}
+
+static inline bool in_range32(u32 val, u32 start, u32 len)
+{
+ return (val - start) < len;
+}
+
+/**
+ * in_range - Determine if a value lies within a range.
+ * @val: Value to test.
+ * @start: First value in range.
+ * @len: Number of values in range.
+ *
+ * This is more efficient than "if (start <= val && val < (start + len))".
+ * It also gives a different answer if @start + @len overflows the size of
+ * the type by a sufficient amount to encompass @val. Decide for yourself
+ * which behaviour you want, or prove that start + len never overflow.
+ * Do not blindly replace one form with the other.
+ */
+#define in_range(val, start, len) \
+ ((sizeof(start) | sizeof(len) | sizeof(val)) <= sizeof(u32) ? \
+ in_range32(val, start, len) : in_range64(val, start, len))
+
/**
* swap - swap values of @a and @b
* @a: first value
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index c238207d1615..e799b1f8d05b 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -31,17 +31,18 @@ struct misc_cg;
* struct misc_res: Per cgroup per misc type resource
* @max: Maximum limit on the resource.
* @usage: Current usage of the resource.
- * @failed: True if charged failed for the resource in a cgroup.
+ * @events: Number of times, the resource limit exceeded.
*/
struct misc_res {
- unsigned long max;
- atomic_long_t usage;
- atomic_long_t events;
+ u64 max;
+ atomic64_t usage;
+ atomic64_t events;
};
/**
* struct misc_cg - Miscellaneous controller's cgroup structure.
* @css: cgroup subsys state object.
+ * @events_file: Handle for the misc resources events file.
* @res: Array of misc resources usage in the cgroup.
*/
struct misc_cg {
@@ -53,12 +54,10 @@ struct misc_cg {
struct misc_res res[MISC_CG_RES_TYPES];
};
-unsigned long misc_cg_res_total_usage(enum misc_res_type type);
-int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity);
-int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
-void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
+u64 misc_cg_res_total_usage(enum misc_res_type type);
+int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
/**
* css_misc() - Get misc cgroup from the css.
@@ -99,27 +98,26 @@ static inline void put_misc_cg(struct misc_cg *cg)
#else /* !CONFIG_CGROUP_MISC */
-static inline unsigned long misc_cg_res_total_usage(enum misc_res_type type)
+static inline u64 misc_cg_res_total_usage(enum misc_res_type type)
{
return 0;
}
-static inline int misc_cg_set_capacity(enum misc_res_type type,
- unsigned long capacity)
+static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
return 0;
}
static inline int misc_cg_try_charge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
return 0;
}
static inline void misc_cg_uncharge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
}
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6646634a0b9d..27f42f713c89 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -33,6 +33,7 @@
#ifndef MLX4_DEVICE_H
#define MLX4_DEVICE_H
+#include <linux/auxiliary_bus.h>
#include <linux/if_ether.h>
#include <linux/pci.h>
#include <linux/completion.h>
@@ -889,6 +890,12 @@ struct mlx4_dev {
u8 uar_page_shift;
};
+struct mlx4_adev {
+ struct auxiliary_device adev;
+ struct mlx4_dev *mdev;
+ int idx;
+};
+
struct mlx4_clock_params {
u64 offset;
u8 bar;
@@ -1087,6 +1094,19 @@ static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
(offset & (PAGE_SIZE - 1));
}
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+ return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
+{
+ return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
+}
+
+int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1,
+ u8 v2p_p2);
+
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 1834c8fad12e..69825223081f 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -34,8 +34,12 @@
#define MLX4_DRIVER_H
#include <net/devlink.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/notifier.h>
#include <linux/mlx4/device.h>
+#define MLX4_ADEV_NAME "mlx4_core"
+
struct mlx4_dev;
#define MLX4_MAC_MASK 0xffffffffffffULL
@@ -54,41 +58,19 @@ enum {
MLX4_INTFF_BONDING = 1 << 0
};
-struct mlx4_interface {
- void * (*add) (struct mlx4_dev *dev);
- void (*remove)(struct mlx4_dev *dev, void *context);
- void (*event) (struct mlx4_dev *dev, void *context,
- enum mlx4_dev_event event, unsigned long param);
- void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
- void (*activate)(struct mlx4_dev *dev, void *context);
- struct list_head list;
+struct mlx4_adrv {
+ struct auxiliary_driver adrv;
enum mlx4_protocol protocol;
int flags;
};
-int mlx4_register_interface(struct mlx4_interface *intf);
-void mlx4_unregister_interface(struct mlx4_interface *intf);
-
-int mlx4_bond(struct mlx4_dev *dev);
-int mlx4_unbond(struct mlx4_dev *dev);
-static inline int mlx4_is_bonded(struct mlx4_dev *dev)
-{
- return !!(dev->flags & MLX4_FLAG_BONDED);
-}
-
-static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
-{
- return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
-}
-
-struct mlx4_port_map {
- u8 port1;
- u8 port2;
-};
-
-int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv);
+void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv);
-void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+int mlx4_register_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb);
+int mlx4_unregister_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb);
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index c0af74efd3cb..01275c6e8468 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -364,6 +364,11 @@ enum mlx5_event {
enum mlx5_driver_event {
MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
+ MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
+ MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
+ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
+ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
};
enum {
@@ -716,6 +721,7 @@ enum sync_rst_state_type {
MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
+ MLX5_SYNC_RST_STATE_RESET_UNLOAD = 0x3,
};
struct mlx5_eqe_sync_fw_update {
@@ -912,7 +918,7 @@ static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
}
-static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
+static inline bool cqe_has_vlan(const struct mlx5_cqe64 *cqe)
{
return cqe->l4_l3_hdr_type & 0x1;
}
@@ -1207,9 +1213,7 @@ enum mlx5_cap_type {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
- MLX5_CAP_RESERVED,
- MLX5_CAP_VECTOR_CALC,
- MLX5_CAP_QOS,
+ MLX5_CAP_QOS = 0xc,
MLX5_CAP_DEBUG,
MLX5_CAP_RESERVED_14,
MLX5_CAP_DEV_MEM,
@@ -1219,7 +1223,6 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
MLX5_CAP_CRYPTO = 0x1a,
- MLX5_CAP_DEV_SHAMPO = 0x1d,
MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
MLX5_CAP_PORT_SELECTION = 0x25,
@@ -1238,7 +1241,6 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
- MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
MLX5_MCAM_REGS_NUM = 0x3,
};
@@ -1278,10 +1280,6 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
-#define MLX5_CAP_ETH_MAX(mdev, cap) \
- MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap)
-
#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
@@ -1304,77 +1302,40 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
-#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap)
-
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
-
#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
-
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
-
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-
#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
-#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
-
#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
-#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
-
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
-#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap)
-
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
-
#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
-#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
-
#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
-#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
-
#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
-#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, ft_field_support_2_esw_fdb.cap)
-
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
@@ -1383,10 +1344,6 @@ enum mlx5_qcam_feature_groups {
MLX5_GET64(flow_table_eswitch_cap, \
(mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
-#define MLX5_CAP_ESW_MAX(mdev, cap) \
- MLX5_GET(e_switch_cap, \
- mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
-
#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
MLX5_GET(port_selection_cap, \
mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
@@ -1399,26 +1356,15 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(adv_virtualization_cap, \
mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
-#define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \
- MLX5_GET(adv_virtualization_cap, \
- mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->max, cap)
-
#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
-#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
- MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
-
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
#define MLX5_CAP_ODP_MAX(mdev, cap)\
MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
-#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
- MLX5_GET(vector_calc_cap, \
- mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap)
-
#define MLX5_CAP_QOS(mdev, cap)\
MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
@@ -1435,10 +1381,6 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
mng_access_reg_cap_mask.access_regs.reg)
-#define MLX5_CAP_MCAM_REG1(mdev, reg) \
- MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
- mng_access_reg_cap_mask.access_regs1.reg)
-
#define MLX5_CAP_MCAM_REG2(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
mng_access_reg_cap_mask.access_regs2.reg)
@@ -1484,9 +1426,6 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_CRYPTO(mdev, cap)\
MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
-#define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
- MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
-
#define MLX5_CAP_MACSEC(mdev, cap)\
MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 4b9626cd83e4..41f03b352401 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -134,6 +134,7 @@ enum {
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
+ MLX5_REG_MTCAP = 0x9009,
MLX5_REG_MTMP = 0x900A,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MFRL = 0x9028,
@@ -149,11 +150,14 @@ enum {
MLX5_REG_MTPPSE = 0x9054,
MLX5_REG_MTUTC = 0x9055,
MLX5_REG_MPEGC = 0x9056,
+ MLX5_REG_MPIR = 0x9059,
MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
+ MLX5_REG_MSECQ = 0x9155,
+ MLX5_REG_MSEES = 0x9156,
MLX5_REG_MIRC = 0x9162,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
@@ -287,18 +291,23 @@ struct mlx5_cmd_stats {
struct mlx5_cmd {
struct mlx5_nb nb;
+ /* members which needs to be queried or reinitialized each reload */
+ struct {
+ u16 cmdif_rev;
+ u8 log_sz;
+ u8 log_stride;
+ int max_reg_cmds;
+ unsigned long bitmask;
+ struct semaphore sem;
+ struct semaphore pages_sem;
+ struct semaphore throttle_sem;
+ } vars;
enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
void *cmd_buf;
dma_addr_t dma;
- u16 cmdif_rev;
- u8 log_sz;
- u8 log_stride;
- int max_reg_cmds;
- int events;
- u32 __iomem *vector;
/* protect command queue allocations
*/
@@ -308,12 +317,8 @@ struct mlx5_cmd {
*/
spinlock_t token_lock;
u8 token;
- unsigned long bitmask;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct workqueue_struct *wq;
- struct semaphore sem;
- struct semaphore pages_sem;
- struct semaphore throttle_sem;
int mode;
u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
@@ -321,7 +326,7 @@ struct mlx5_cmd {
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
- struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
+ struct xarray stats;
};
struct mlx5_cmd_mailbox {
@@ -382,7 +387,6 @@ enum mlx5_res_type {
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
MLX5_RES_XRQ = 5,
- MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
};
struct mlx5_core_rsc_common {
@@ -443,15 +447,6 @@ struct mlx5_core_health {
struct delayed_work update_fw_log_ts_work;
};
-struct mlx5_qp_table {
- struct notifier_block nb;
-
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
-
enum {
MLX5_PF_NOTIFY_DISABLE_VF,
MLX5_PF_NOTIFY_ENABLE_VF,
@@ -474,6 +469,7 @@ struct mlx5_core_sriov {
struct mlx5_vf_context *vfs_ctx;
int num_vfs;
u16 max_vfs;
+ u16 max_ec_vfs;
};
struct mlx5_fc_pool {
@@ -510,7 +506,7 @@ struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
-struct mlx5_devcom;
+struct mlx5_devcom_dev;
struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
@@ -580,6 +576,7 @@ enum mlx5_func_type {
MLX5_VF,
MLX5_SF,
MLX5_HOST_PF,
+ MLX5_EC_VF,
MLX5_FUNC_TYPE_NUM,
};
@@ -619,6 +616,7 @@ struct mlx5_priv {
int adev_idx;
int sw_vhca_id;
struct mlx5_events *events;
+ struct mlx5_vhca_events *vhca_events;
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
@@ -626,7 +624,8 @@ struct mlx5_priv {
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
u32 flags;
- struct mlx5_devcom *devcom;
+ struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_comp_dev *hca_devcom_comp;
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
@@ -680,6 +679,9 @@ struct mlx5e_resources {
struct mlx5_td td;
u32 mkey;
struct mlx5_sq_bfreg bfreg;
+#define MLX5_MAX_NUM_TC 8
+ u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
+ bool tisn_valid;
} hw_objs;
struct net_device *uplink_netdev;
struct mutex uplink_netdev_lock;
@@ -690,6 +692,7 @@ enum mlx5_sw_icm_type {
MLX5_SW_ICM_TYPE_STEERING,
MLX5_SW_ICM_TYPE_HEADER_MODIFY,
MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN,
+ MLX5_SW_ICM_TYPE_SW_ENCAP,
};
#define MLX5_MAX_RESERVED_GIDS 8
@@ -733,7 +736,6 @@ struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;
-struct mlx5_thermal;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
@@ -812,7 +814,15 @@ struct mlx5_core_dev {
struct mlx5_rsc_dump *rsc_dump;
u32 vsc_addr;
struct mlx5_hv_vhca *hv_vhca;
- struct mlx5_thermal *thermal;
+ struct mlx5_hwmon *hwmon;
+ u64 num_block_tc;
+ u64 num_block_ipsec;
+#ifdef CONFIG_MLX5_MACSEC
+ struct mlx5_macsec_fs *macsec_fs;
+ /* MACsec notifier chain to sync MACsec core and IB database */
+ struct blocking_notifier_head macsec_nh;
+#endif
+ u64 num_ipsec_offloads;
};
struct mlx5_db {
@@ -1026,7 +1036,8 @@ bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
+void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data);
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
@@ -1037,10 +1048,6 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
-struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- gfp_t flags, int npages);
-void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- struct mlx5_cmd_mailbox *head);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
@@ -1054,8 +1061,6 @@ void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s32 npages, bool ec_function);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
@@ -1063,7 +1068,7 @@ void mlx5_unregister_debugfs(void);
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn);
+int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1095,8 +1100,6 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
- struct mlx5_odp_caps *odp_caps);
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
@@ -1113,9 +1116,8 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
-unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
-struct cpumask *
-mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
+unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev);
+int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
@@ -1174,7 +1176,13 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
int num_counters,
size_t *offsets);
-struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i);
+
+#define mlx5_lag_for_each_peer_mdev(dev, peer, i) \
+ for (i = 0, peer = mlx5_lag_get_next_peer_mdev(dev, &i); \
+ peer; \
+ peer = mlx5_lag_get_next_peer_mdev(dev, &i))
+
u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
@@ -1193,12 +1201,6 @@ int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
int vf_id,
struct notifier_block *nb);
-#ifdef CONFIG_MLX5_CORE_IPOIB
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *));
-#endif /* CONFIG_MLX5_CORE_IPOIB */
int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
struct ib_device *device,
struct rdma_netdev_alloc_params *params);
@@ -1250,6 +1252,11 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master);
}
+static inline u16 mlx5_core_max_ec_vfs(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.sriov.max_ec_vfs;
+}
+
static inline int mlx5_get_gid_table_len(u16 param)
{
if (param > 4) {
@@ -1317,6 +1324,52 @@ static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
return mlx5_is_roce_on(dev);
}
+#ifdef CONFIG_MLX5_MACSEC
+static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
+{
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
+ return false;
+
+ return true;
+}
+
+#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
+
+static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
+{
+ if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) &
+ NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) ||
+ !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs)
+ return false;
+
+ return true;
+}
+#endif
+
enum {
MLX5_OCTWORD = 16,
};
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index e2701ed0200e..df73a2ccc9af 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -7,6 +7,7 @@
#define _MLX5_ESWITCH_
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <net/devlink.h>
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
@@ -144,6 +145,9 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
ESW_TUN_OPTS_OFFSET + 1)
+/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */
+#define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+
u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
@@ -207,4 +211,11 @@ static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
}
+/* The returned number is valid only when the dev is eswitch manager. */
+static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_ecpf_esw_manager(dev) ?
+ MLX5_VPORT_ECPF : MLX5_VPORT_PF;
+}
+
#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 2cb404c7ea13..3fb428ce7d1c 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -67,6 +67,7 @@ enum {
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
+ MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
};
#define LEFTOVERS_RULE_NUM 2
@@ -105,15 +106,19 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
};
enum {
FDB_BYPASS_PATH,
+ FDB_CRYPTO_INGRESS,
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
FDB_TC_MISS,
FDB_BR_OFFLOAD,
FDB_SLOW_PATH,
+ FDB_CRYPTO_EGRESS,
FDB_PER_VPORT,
};
@@ -127,6 +132,7 @@ struct mlx5_flow_handle;
enum {
FLOW_CONTEXT_HAS_TAG = BIT(0),
+ FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
};
struct mlx5_flow_context {
diff --git a/include/linux/mlx5/macsec.h b/include/linux/mlx5/macsec.h
new file mode 100644
index 000000000000..f7ff4c2a95d0
--- /dev/null
+++ b/include/linux/mlx5/macsec.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef MLX5_MACSEC_H
+#define MLX5_MACSEC_H
+
+#ifdef CONFIG_MLX5_MACSEC
+struct mlx5_macsec_event_data {
+ struct mlx5_macsec_fs *macsec_fs;
+ void *macdev;
+ u32 fs_id;
+ bool is_tx;
+};
+
+int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs);
+
+void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list);
+
+void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs, bool is_tx);
+
+void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list, bool is_tx);
+
+#endif
+#endif /* MLX5_MACSEC_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index b89778d0d326..486b7492050c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -65,9 +65,11 @@ enum {
enum {
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS = 0x1,
MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC = 0x15,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2 = 0x20,
MLX5_SET_HCA_CAP_OP_MOD_PORT_SELECTION = 0x25,
};
@@ -310,6 +312,7 @@ enum {
MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
+ MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
MLX5_CMD_OP_MAX
};
@@ -432,7 +435,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 flow_table_modify[0x1];
u8 reformat[0x1];
u8 decap[0x1];
- u8 reserved_at_9[0x1];
+ u8 reset_root_to_default[0x1];
u8 pop_vlan[0x1];
u8 push_vlan[0x1];
u8 reserved_at_c[0x1];
@@ -464,10 +467,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reformat_add_esp_trasport[0x1];
u8 reformat_l2_to_l3_esp_tunnel[0x1];
- u8 reserved_at_42[0x1];
+ u8 reformat_add_esp_transport_over_udp[0x1];
u8 reformat_del_esp_trasport[0x1];
u8 reformat_l3_esp_tunnel_to_l2[0x1];
- u8 reserved_at_45[0x1];
+ u8 reformat_del_esp_transport_over_udp[0x1];
u8 execute_aso[0x1];
u8 reserved_at_47[0x19];
@@ -618,7 +621,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
- u8 reserved_at_160[0x20];
+ u8 inner_esp_spi[0x20];
u8 outer_esp_spi[0x20];
u8 reserved_at_1a0[0x60];
};
@@ -1100,7 +1103,7 @@ struct mlx5_ifc_roce_cap_bits {
u8 sw_r_roce_src_udp_port[0x1];
u8 fl_rc_qp_when_roce_disabled[0x1];
u8 fl_rc_qp_when_roce_enabled[0x1];
- u8 reserved_at_7[0x1];
+ u8 roce_cc_general[0x1];
u8 qp_ooo_transmit_default[0x1];
u8 reserved_at_9[0x15];
u8 qp_ts_format[0x2];
@@ -1190,7 +1193,8 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 log_sw_icm_alloc_granularity[0x6];
u8 log_steering_sw_icm_size[0x8];
- u8 reserved_at_120[0x18];
+ u8 log_indirect_encap_sw_icm_size[0x8];
+ u8 reserved_at_128[0x10];
u8 log_header_modify_pattern_sw_icm_size[0x8];
u8 header_modify_sw_icm_start_address[0x40];
@@ -1201,7 +1205,11 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 memic_operations[0x20];
- u8 reserved_at_220[0x5e0];
+ u8 reserved_at_220[0x20];
+
+ u8 indirect_encap_sw_icm_start_address[0x40];
+
+ u8 reserved_at_280[0x580];
};
struct mlx5_ifc_device_event_cap_bits {
@@ -1229,7 +1237,14 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
u8 max_emulated_devices[0x8];
u8 max_num_virtio_queues[0x18];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x20];
+
+ u8 reserved_at_c0[0x13];
+ u8 desc_group_mkey_supported[0x1];
+ u8 freeze_to_rdy_supported[0x1];
+ u8 reserved_at_d5[0xb];
+
+ u8 reserved_at_e0[0x20];
u8 umem_1_buffer_param_a[0x20];
@@ -1314,33 +1329,6 @@ struct mlx5_ifc_odp_cap_bits {
u8 reserved_at_120[0x6E0];
};
-struct mlx5_ifc_calc_op {
- u8 reserved_at_0[0x10];
- u8 reserved_at_10[0x9];
- u8 op_swap_endianness[0x1];
- u8 op_min[0x1];
- u8 op_xor[0x1];
- u8 op_or[0x1];
- u8 op_and[0x1];
- u8 op_max[0x1];
- u8 op_add[0x1];
-};
-
-struct mlx5_ifc_vector_calc_cap_bits {
- u8 calc_matrix[0x1];
- u8 reserved_at_1[0x1f];
- u8 reserved_at_20[0x8];
- u8 max_vec_count[0x8];
- u8 reserved_at_30[0xd];
- u8 max_chunk_size[0x3];
- struct mlx5_ifc_calc_op calc0;
- struct mlx5_ifc_calc_op calc1;
- struct mlx5_ifc_calc_op calc2;
- struct mlx5_ifc_calc_op calc3;
-
- u8 reserved_at_c0[0x720];
-};
-
struct mlx5_ifc_tls_cap_bits {
u8 tls_1_2_aes_gcm_128[0x1];
u8 tls_1_3_aes_gcm_128[0x1];
@@ -1710,9 +1698,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 regexp_params[0x1];
u8 uar_sz[0x6];
u8 port_selection_cap[0x1];
- u8 reserved_at_248[0x1];
+ u8 reserved_at_251[0x1];
u8 umem_uid_0[0x1];
- u8 reserved_at_250[0x5];
+ u8 reserved_at_253[0x5];
u8 log_pg_sz[0x8];
u8 bf[0x1];
@@ -1755,7 +1743,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_328[0x2];
u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
- u8 reserved_at_330[0x9];
+ u8 reserved_at_330[0x6];
+ u8 pci_sync_for_fw_update_with_driver_unload[0x1];
+ u8 vnic_env_cnt_steering_fail[0x1];
+ u8 vport_counter_local_loopback[0x1];
u8 q_counter_aggregation[0x1];
u8 q_counter_other_vport[0x1];
u8 log_max_xrcd[0x5];
@@ -1816,7 +1807,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1];
u8 log_min_hairpin_wq_data_sz[0x5];
- u8 reserved_at_3e8[0x2];
+ u8 reserved_at_3e8[0x1];
+ u8 silent_mode[0x1];
u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
@@ -1833,7 +1825,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_460[0x1];
u8 ats[0x1];
- u8 reserved_at_462[0x1];
+ u8 cross_vhca_rqt[0x1];
u8 log_max_uctx[0x5];
u8 reserved_at_468[0x1];
u8 crypto[0x1];
@@ -1956,6 +1948,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 match_definer_format_supported[0x40];
};
+enum {
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000,
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE = (1ULL << 20),
+};
+
+enum {
+ MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE = 0x200,
+};
+
struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_0[0x80];
@@ -1970,9 +1971,15 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_c0[0x8];
u8 migration_multi_load[0x1];
u8 migration_tracking_state[0x1];
- u8 reserved_at_ca[0x16];
+ u8 reserved_at_ca[0x6];
+ u8 migration_in_chunks[0x1];
+ u8 reserved_at_d1[0xf];
+
+ u8 cross_vhca_object_to_object_supported[0x20];
+
+ u8 allowed_object_for_other_vhca_access[0x40];
- u8 reserved_at_e0[0xc0];
+ u8 reserved_at_140[0x60];
u8 flow_table_type_2_type[0x8];
u8 reserved_at_1a8[0x3];
@@ -1990,7 +1997,14 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 ts_cqe_metadata_size2wqe_counter[0x5];
u8 reserved_at_250[0x10];
- u8 reserved_at_260[0x5a0];
+ u8 reserved_at_260[0x120];
+ u8 reserved_at_380[0x10];
+ u8 ec_vf_vport_base[0x10];
+
+ u8 reserved_at_3a0[0x10];
+ u8 max_rqt_vhca_id[0x10];
+
+ u8 reserved_at_3c0[0x440];
};
enum mlx5_ifc_flow_destination_type {
@@ -2149,6 +2163,13 @@ struct mlx5_ifc_rq_num_bits {
u8 rq_num[0x18];
};
+struct mlx5_ifc_rq_vhca_bits {
+ u8 reserved_at_0[0x8];
+ u8 rq_num[0x18];
+ u8 reserved_at_20[0x10];
+ u8 rq_vhca_id[0x10];
+};
+
struct mlx5_ifc_mac_address_layout_bits {
u8 reserved_at_0[0x10];
u8 mac_addr_47_32[0x10];
@@ -3112,7 +3133,9 @@ struct mlx5_ifc_dtor_reg_bits {
struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
- u8 reserved_at_1c0[0x40];
+ struct mlx5_ifc_default_timeout_bits reset_unload_to;
+
+ u8 reserved_at_1c0[0x20];
};
enum {
@@ -3427,20 +3450,6 @@ struct mlx5_ifc_roce_addr_layout_bits {
u8 reserved_at_e0[0x20];
};
-struct mlx5_ifc_shampo_cap_bits {
- u8 reserved_at_0[0x3];
- u8 shampo_log_max_reservation_size[0x5];
- u8 reserved_at_8[0x3];
- u8 shampo_log_min_reservation_size[0x5];
- u8 shampo_min_mss_size[0x10];
-
- u8 reserved_at_20[0x3];
- u8 shampo_max_log_headers_entry_size[0x5];
- u8 reserved_at_28[0x18];
-
- u8 reserved_at_40[0x7c0];
-};
-
struct mlx5_ifc_crypto_cap_bits {
u8 reserved_at_0[0x3];
u8 synchronize_dek[0x1];
@@ -3476,16 +3485,15 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
- struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
- struct mlx5_ifc_shampo_cap_bits shampo_cap;
struct mlx5_ifc_macsec_cap_bits macsec_cap;
struct mlx5_ifc_crypto_cap_bits crypto_cap;
+ struct mlx5_ifc_ipsec_cap_bits ipsec_cap;
u8 reserved_at_0[0x8000];
};
@@ -3568,7 +3576,7 @@ struct mlx5_ifc_flow_context_bits {
u8 action[0x10];
u8 extended_destination[0x1];
- u8 reserved_at_81[0x1];
+ u8 uplink_hairpin_en[0x1];
u8 flow_source[0x2];
u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
@@ -3673,7 +3681,13 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits {
u8 eth_wqe_too_small[0x20];
- u8 reserved_at_220[0xdc0];
+ u8 reserved_at_220[0xc0];
+
+ u8 generated_pkt_steering_fail[0x40];
+
+ u8 handled_pkt_steering_fail[0x40];
+
+ u8 reserved_at_360[0xc80];
};
struct mlx5_ifc_traffic_counter_bits {
@@ -3906,7 +3920,10 @@ struct mlx5_ifc_rqtc_bits {
u8 reserved_at_e0[0x6a0];
- struct mlx5_ifc_rq_num_bits rq_num[];
+ union {
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_num_bits, rq_num);
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_vhca_bits, rq_vhca);
+ };
};
enum {
@@ -4019,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 affiliation_criteria[0x4];
u8 affiliated_vhca_id[0x10];
- u8 reserved_at_60[0xd0];
+ u8 reserved_at_60[0xa0];
+ u8 reserved_at_100[0x1];
+ u8 sd_group[0x3];
+ u8 reserved_at_104[0x1c];
+
+ u8 reserved_at_120[0x10];
u8 mtu[0x10];
u8 system_image_guid[0x40];
@@ -4749,7 +4771,10 @@ struct mlx5_ifc_set_l2_table_entry_in_bits {
u8 reserved_at_c0[0x20];
- u8 reserved_at_e0[0x13];
+ u8 reserved_at_e0[0x10];
+ u8 silent_mode_valid[0x1];
+ u8 silent_mode[0x1];
+ u8 reserved_at_f2[0x1];
u8 vlan_valid[0x1];
u8 vlan[0xc];
@@ -4797,7 +4822,8 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 op_mod[0x10];
u8 other_function[0x1];
- u8 reserved_at_41[0xf];
+ u8 ec_vf_function[0x1];
+ u8 reserved_at_42[0xe];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -5175,7 +5201,9 @@ struct mlx5_ifc_query_vport_counter_out_bits {
struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
- u8 reserved_at_680[0xa00];
+ struct mlx5_ifc_traffic_counter_bits local_loopback;
+
+ u8 reserved_at_700[0x980];
};
enum {
@@ -5948,7 +5976,8 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 op_mod[0x10];
u8 other_function[0x1];
- u8 reserved_at_41[0xf];
+ u8 ec_vf_function[0x1];
+ u8 reserved_at_42[0xe];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -6391,6 +6420,28 @@ struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_allow_other_vhca_access_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x50];
+ u8 object_type_to_be_accessed[0x10];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_c0[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+};
+
+struct mlx5_ifc_allow_other_vhca_access_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_modify_header_arg_bits {
u8 reserved_at_0[0x80];
@@ -6413,6 +6464,24 @@ struct mlx5_ifc_create_match_definer_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
};
+struct mlx5_ifc_alias_context_bits {
+ u8 vhca_id_to_be_accessed[0x10];
+ u8 reserved_at_10[0xd];
+ u8 status[0x3];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_40[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+ u8 metadata[0x80];
+};
+
+struct mlx5_ifc_create_alias_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_alias_context_bits alias_ctx;
+};
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@@ -6647,9 +6716,12 @@ enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xa,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xc,
MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
@@ -10047,6 +10119,19 @@ struct mlx5_ifc_mpegc_reg_bits {
u8 reserved_at_60[0x100];
};
+struct mlx5_ifc_mpir_reg_bits {
+ u8 sdm[0x1];
+ u8 reserved_at_1[0x1b];
+ u8 host_buses[0x4];
+
+ u8 reserved_at_20[0x20];
+
+ u8 local_port[0x8];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
enum {
MLX5_MTUTC_FREQ_ADJ_UNITS_PPB = 0x0,
MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM = 0x1,
@@ -10061,7 +10146,10 @@ enum {
struct mlx5_ifc_mtutc_reg_bits {
u8 reserved_at_0[0x5];
u8 freq_adj_units[0x3];
- u8 reserved_at_8[0x14];
+ u8 reserved_at_8[0x3];
+ u8 log_max_freq_adjustment[0x5];
+
+ u8 reserved_at_10[0xc];
u8 operation[0x4];
u8 freq_adjustment[0x20];
@@ -10173,9 +10261,13 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_63_to_46[0x12];
u8 mrtc[0x1];
- u8 regs_44_to_32[0xd];
+ u8 regs_44_to_41[0x4];
+ u8 mfrl[0x1];
+ u8 regs_39_to_32[0x8];
- u8 regs_31_to_0[0x20];
+ u8 regs_31_to_10[0x16];
+ u8 mtmp[0x1];
+ u8 regs_8_to_0[0x9];
};
struct mlx5_ifc_mcam_access_reg_bits1 {
@@ -10193,7 +10285,9 @@ struct mlx5_ifc_mcam_access_reg_bits2 {
u8 mirc[0x1];
u8 regs_97_to_96[0x2];
- u8 regs_95_to_64[0x20];
+ u8 regs_95_to_87[0x09];
+ u8 synce_registers[0x2];
+ u8 regs_84_to_64[0x15];
u8 regs_63_to_32[0x20];
@@ -10589,6 +10683,7 @@ enum {
MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12,
};
struct mlx5_ifc_initial_seg_bits {
@@ -10835,8 +10930,9 @@ enum {
MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
- MLX5_MFRL_REG_RESET_STATE_TIMEOUT = 3,
+ MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT = 3,
MLX5_MFRL_REG_RESET_STATE_NACK = 4,
+ MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT = 5,
};
enum {
@@ -10928,6 +11024,15 @@ struct mlx5_ifc_mrtc_reg_bits {
u8 time_l[0x20];
};
+struct mlx5_ifc_mtcap_reg_bits {
+ u8 reserved_at_0[0x19];
+ u8 sensor_count[0x7];
+
+ u8 reserved_at_20[0x20];
+
+ u8 sensor_map[0x40];
+};
+
struct mlx5_ifc_mtmp_reg_bits {
u8 reserved_at_0[0x14];
u8 sensor_index[0xc];
@@ -11015,6 +11120,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
+ struct mlx5_ifc_mtcap_reg_bits mtcap_reg;
struct mlx5_ifc_mtmp_reg_bits mtmp_reg;
u8 reserved_at_0[0x60e0];
};
@@ -11923,6 +12029,7 @@ enum {
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
};
enum {
@@ -11942,6 +12049,13 @@ enum {
MLX5_IPSEC_ASO_INC_SN = 0x2,
};
+enum {
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
struct mlx5_ifc_ipsec_aso_bits {
u8 valid[0x1];
u8 reserved_at_201[0x1];
@@ -12398,7 +12512,8 @@ struct mlx5_ifc_query_vhca_migration_state_in_bits {
u8 op_mod[0x10];
u8 incremental[0x1];
- u8 reserved_at_41[0xf];
+ u8 chunk[0x1];
+ u8 reserved_at_42[0xe];
u8 vhca_id[0x10];
u8 reserved_at_60[0x20];
@@ -12414,7 +12529,11 @@ struct mlx5_ifc_query_vhca_migration_state_out_bits {
u8 required_umem_size[0x20];
- u8 reserved_at_a0[0x160];
+ u8 reserved_at_a0[0x20];
+
+ u8 remaining_total_size[0x40];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_save_vhca_state_in_bits {
@@ -12446,7 +12565,7 @@ struct mlx5_ifc_save_vhca_state_out_bits {
u8 actual_image_size[0x20];
- u8 reserved_at_60[0x20];
+ u8 next_required_umem_size[0x20];
};
struct mlx5_ifc_load_vhca_state_in_bits {
@@ -12555,4 +12674,59 @@ struct mlx5_ifc_modify_page_track_obj_in_bits {
struct mlx5_ifc_page_track_bits obj_context;
};
+struct mlx5_ifc_msecq_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x12];
+ u8 network_option[0x2];
+ u8 local_ssm_code[0x4];
+ u8 local_enhanced_ssm_code[0x8];
+
+ u8 local_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
+enum {
+ MLX5_MSEES_FIELD_SELECT_ENABLE = BIT(0),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS = BIT(1),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE = BIT(2),
+};
+
+enum mlx5_msees_admin_status {
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_ADMIN_STATUS_TRACK = 0x1,
+};
+
+enum mlx5_msees_oper_status {
+ MLX5_MSEES_OPER_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_OPER_STATUS_SELF_TRACK = 0x1,
+ MLX5_MSEES_OPER_STATUS_OTHER_TRACK = 0x2,
+ MLX5_MSEES_OPER_STATUS_HOLDOVER = 0x3,
+ MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER = 0x4,
+ MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5,
+};
+
+struct mlx5_ifc_msees_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 lp_msb[0x2];
+ u8 reserved_at_14[0xc];
+
+ u8 field_select[0x20];
+
+ u8 admin_status[0x4];
+ u8 oper_status[0x4];
+ u8 ho_acq[0x1];
+ u8 reserved_at_49[0xc];
+ u8 admin_freq_measure[0x1];
+ u8 oper_freq_measure[0x1];
+ u8 failure_reason[0x9];
+
+ u8 frequency_diff[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
index 9becdc3fa503..40371c916cf9 100644
--- a/include/linux/mlx5/mlx5_ifc_vdpa.h
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -74,7 +74,11 @@ struct mlx5_ifc_virtio_q_bits {
u8 reserved_at_320[0x8];
u8 pd[0x18];
- u8 reserved_at_340[0xc0];
+ u8 reserved_at_340[0x20];
+
+ u8 desc_group_mkey[0x20];
+
+ u8 reserved_at_380[0x80];
};
struct mlx5_ifc_virtio_net_q_object_bits {
@@ -141,6 +145,11 @@ enum {
MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY = (u64)1 << 11,
+ MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14,
};
enum {
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 98b2e1e149f9..26092c78a985 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -115,8 +115,9 @@ enum mlx5e_ext_link_mode {
MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
- MLX5E_400GAUI_8 = 15,
+ MLX5E_400GAUI_8_400GBASE_CR8 = 15,
MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
+ MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
MLX5E_EXT_LINK_MODES_NUMBER,
};
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index bd53cf4be7bd..f0e55bf3ec8b 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -269,7 +269,10 @@ struct mlx5_wqe_eth_seg {
union {
struct {
__be16 sz;
- u8 start[2];
+ union {
+ u8 start[2];
+ DECLARE_FLEX_ARRAY(u8, data);
+ };
} inline_hdr;
struct {
__be16 type;
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 7f31432f44c2..c36cc6d82926 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u16 vport, u64 node_guid);
@@ -132,6 +133,6 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
-int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out,
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
u16 opmod);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 27ce77080c79..f5a97dec5169 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -319,11 +319,13 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
+#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#ifdef CONFIG_ARCH_HAS_PKEYS
@@ -339,14 +341,27 @@ extern unsigned int kobjsize(const void *objp);
#endif
#endif /* CONFIG_ARCH_HAS_PKEYS */
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+/*
+ * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
+ * support core mm.
+ *
+ * These VMAs will get a single end guard page. This helps userspace protect
+ * itself from attacks. A single page is enough for current shadow stack archs
+ * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
+ * for more details on the guard size.
+ */
+# define VM_SHADOW_STACK VM_HIGH_ARCH_5
+#else
+# define VM_SHADOW_STACK VM_NONE
+#endif
+
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
-#elif defined(CONFIG_IA64)
-# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_SPARC64)
# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
# define VM_ARCH_CLEAR VM_SPARC_ADI
@@ -370,14 +385,14 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
-# define VM_UFFD_MINOR_BIT 37
+# define VM_UFFD_MINOR_BIT 38
# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
# define VM_UFFD_MINOR VM_NONE
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
/* Bits set in the VMA until the stack is in its final location */
-#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
+#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
@@ -397,10 +412,14 @@ extern unsigned int kobjsize(const void *objp);
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
+#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
+
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK VM_GROWSUP
+#define VM_STACK_EARLY VM_GROWSDOWN
#else
#define VM_STACK VM_GROWSDOWN
+#define VM_STACK_EARLY 0
#endif
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
@@ -530,13 +549,6 @@ struct vm_fault {
*/
};
-/* page entry size for vm->huge_fault() */
-enum page_entry_size {
- PE_SIZE_PTE = 0,
- PE_SIZE_PMD,
- PE_SIZE_PUD,
-};
-
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -560,8 +572,7 @@ struct vm_operations_struct {
int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
- vm_fault_t (*huge_fault)(struct vm_fault *vmf,
- enum page_entry_size pe_size);
+ vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
@@ -606,7 +617,7 @@ struct vm_operations_struct {
* policy.
*/
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, pgoff_t *ilx);
#endif
/*
* Called by vm_normal_page() for special PTEs to find the
@@ -639,8 +650,14 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
*/
static inline bool vma_start_read(struct vm_area_struct *vma)
{
- /* Check before locking. A race might cause false locked result. */
- if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))
+ /*
+ * Check before locking. A race might cause false locked result.
+ * We can use READ_ONCE() for the mm_lock_seq here, and don't need
+ * ACQUIRE semantics, because this is just a lockless check whose result
+ * we don't rely on for anything - the mm_lock_seq read against which we
+ * need ordering is below.
+ */
+ if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
return false;
if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
@@ -651,8 +668,13 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
* False unlocked result is impossible because we modify and check
* vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
* modification invalidates all existing locks.
+ *
+ * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
+ * racing with vma_end_write_all(), we only start reading from the VMA
+ * after it has been unlocked.
+ * This pairs with RELEASE semantics in vma_end_write_all().
*/
- if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) {
+ if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
up_read(&vma->vm_lock->lock);
return false;
}
@@ -666,6 +688,7 @@ static inline void vma_end_read(struct vm_area_struct *vma)
rcu_read_unlock();
}
+/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
{
mmap_assert_write_locked(vma->vm_mm);
@@ -674,10 +697,15 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
* mm->mm_lock_seq can't be concurrently modified.
*/
- *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq);
+ *mm_lock_seq = vma->vm_mm->mm_lock_seq;
return (vma->vm_lock_seq == *mm_lock_seq);
}
+/*
+ * Begin writing to a VMA.
+ * Exclude concurrent readers under the per-VMA lock until the currently
+ * write-locked mmap_lock is dropped or downgraded.
+ */
static inline void vma_start_write(struct vm_area_struct *vma)
{
int mm_lock_seq;
@@ -686,30 +714,27 @@ static inline void vma_start_write(struct vm_area_struct *vma)
return;
down_write(&vma->vm_lock->lock);
- vma->vm_lock_seq = mm_lock_seq;
+ /*
+ * We should use WRITE_ONCE() here because we can have concurrent reads
+ * from the early lockless pessimistic check in vma_start_read().
+ * We don't really care about the correctness of that early check, but
+ * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
+ */
+ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
up_write(&vma->vm_lock->lock);
}
-static inline bool vma_try_start_write(struct vm_area_struct *vma)
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
int mm_lock_seq;
- if (__is_vma_write_locked(vma, &mm_lock_seq))
- return true;
-
- if (!down_write_trylock(&vma->vm_lock->lock))
- return false;
-
- vma->vm_lock_seq = mm_lock_seq;
- up_write(&vma->vm_lock->lock);
- return true;
+ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
}
-static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+static inline void vma_assert_locked(struct vm_area_struct *vma)
{
- int mm_lock_seq;
-
- VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+ if (!rwsem_is_locked(&vma->vm_lock->lock))
+ vma_assert_write_locked(vma);
}
static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
@@ -720,35 +745,65 @@ static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
vma->detached = detached;
}
+static inline void release_fault_lock(struct vm_fault *vmf)
+{
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+ vma_end_read(vmf->vma);
+ else
+ mmap_read_unlock(vmf->vma->vm_mm);
+}
+
+static inline void assert_fault_locked(struct vm_fault *vmf)
+{
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+ vma_assert_locked(vmf->vma);
+ else
+ mmap_assert_locked(vmf->vma->vm_mm);
+}
+
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
#else /* CONFIG_PER_VMA_LOCK */
-static inline void vma_init_lock(struct vm_area_struct *vma) {}
static inline bool vma_start_read(struct vm_area_struct *vma)
{ return false; }
static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
-static inline bool vma_try_start_write(struct vm_area_struct *vma)
- { return true; }
-static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+ { mmap_assert_write_locked(vma->vm_mm); }
static inline void vma_mark_detached(struct vm_area_struct *vma,
bool detached) {}
+static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address)
+{
+ return NULL;
+}
+
+static inline void release_fault_lock(struct vm_fault *vmf)
+{
+ mmap_read_unlock(vmf->vma->vm_mm);
+}
+
+static inline void assert_fault_locked(struct vm_fault *vmf)
+{
+ mmap_assert_locked(vmf->vma->vm_mm);
+}
+
#endif /* CONFIG_PER_VMA_LOCK */
+extern const struct vm_operations_struct vma_dummy_vm_ops;
+
/*
* WARNING: vma_init does not initialize vma->vm_lock.
* Use vm_area_alloc()/vm_area_free() if vma needs locking.
*/
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
- static const struct vm_operations_struct dummy_vm_ops = {};
-
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
- vma->vm_ops = &dummy_vm_ops;
+ vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma_mark_detached(vma, false);
vma_numab_state_init(vma);
@@ -761,18 +816,22 @@ static inline void vm_flags_init(struct vm_area_struct *vma,
ACCESS_PRIVATE(vma, __vm_flags) = flags;
}
-/* Use when VMA is part of the VMA tree and modifications need coordination */
+/*
+ * Use when VMA is part of the VMA tree and modifications need coordination
+ * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
+ * it should be locked explicitly beforehand.
+ */
static inline void vm_flags_reset(struct vm_area_struct *vma,
vm_flags_t flags)
{
- vma_start_write(vma);
+ vma_assert_write_locked(vma);
vm_flags_init(vma, flags);
}
static inline void vm_flags_reset_once(struct vm_area_struct *vma,
vm_flags_t flags)
{
- vma_start_write(vma);
+ vma_assert_write_locked(vma);
WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
}
@@ -821,6 +880,31 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
return !vma->vm_ops;
}
+/*
+ * Indicate if the VMA is a heap for the given task; for
+ * /proc/PID/maps that is the heap of the main task.
+ */
+static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
+{
+ return vma->vm_start < vma->vm_mm->brk &&
+ vma->vm_end > vma->vm_mm->start_brk;
+}
+
+/*
+ * Indicate if the VMA is a stack for the given task; for
+ * /proc/PID/maps that is the stack of the main task.
+ */
+static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
+{
+ /*
+ * We make no effort to guess what a given thread considers to be
+ * its "stack". It's not even well-defined for programs written
+ * languages like Go.
+ */
+ return vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack;
+}
+
static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
@@ -851,6 +935,17 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
return vma->vm_flags & VM_ACCESS_FLAGS;
}
+static inline bool is_shared_maywrite(vm_flags_t vm_flags)
+{
+ return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
+ (VM_SHARED | VM_MAYWRITE);
+}
+
+static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
+{
+ return is_shared_maywrite(vma->vm_flags);
+}
+
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{
@@ -866,11 +961,24 @@ static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
return mas_find(&vmi->mas, ULONG_MAX);
}
+static inline
+struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
+{
+ return mas_next_range(&vmi->mas, ULONG_MAX);
+}
+
+
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
{
return mas_prev(&vmi->mas, 0);
}
+static inline
+struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
+{
+ return mas_prev_range(&vmi->mas, 0);
+}
+
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
{
return vmi->mas.index;
@@ -886,6 +994,17 @@ static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
return mas_expected_entries(&vmi->mas, count);
}
+static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
+ unsigned long start, unsigned long end, gfp_t gfp)
+{
+ __mas_set_range(&vmi->mas, start, end - 1);
+ mas_store_gfp(&vmi->mas, NULL, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
/* Free any unused preallocations */
static inline void vma_iter_free(struct vma_iterator *vmi)
{
@@ -945,7 +1064,7 @@ struct inode;
* compound_order() can be called without holding a reference, which means
* that niceties like page_folio() don't work. These callers should be
* prepared to handle wild return values. For example, PG_head may be
- * set before _folio_order is initialised, or this may be a tail page.
+ * set before the order is initialised, or this may be a tail page.
* See compaction.c for some good examples.
*/
static inline unsigned int compound_order(struct page *page)
@@ -954,7 +1073,7 @@ static inline unsigned int compound_order(struct page *page)
if (!test_bit(PG_head, &folio->flags))
return 0;
- return folio->_folio_order;
+ return folio->_flags_1 & 0xff;
}
/**
@@ -970,7 +1089,7 @@ static inline unsigned int folio_order(struct folio *folio)
{
if (!folio_test_large(folio))
return 0;
- return folio->_folio_order;
+ return folio->_flags_1 & 0xff;
}
#include <linux/huge_mm.h>
@@ -1041,11 +1160,6 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-
-#ifndef is_ioremap_addr
-#define is_ioremap_addr(x) is_vmalloc_addr(x)
-#endif
-
#ifdef CONFIG_MMU
extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
@@ -1189,56 +1303,8 @@ void folio_copy(struct folio *dst, struct folio *src);
unsigned long nr_free_buffer_pages(void);
-/*
- * Compound pages have a destructor function. Provide a
- * prototype for that function and accessor functions.
- * These are _only_ valid on the head of a compound page.
- */
-typedef void compound_page_dtor(struct page *);
-
-/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
-enum compound_dtor_id {
- NULL_COMPOUND_DTOR,
- COMPOUND_PAGE_DTOR,
-#ifdef CONFIG_HUGETLB_PAGE
- HUGETLB_PAGE_DTOR,
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- TRANSHUGE_PAGE_DTOR,
-#endif
- NR_COMPOUND_DTORS,
-};
-extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
-
-static inline void set_compound_page_dtor(struct page *page,
- enum compound_dtor_id compound_dtor)
-{
- struct folio *folio = (struct folio *)page;
-
- VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
- VM_BUG_ON_PAGE(!PageHead(page), page);
- folio->_folio_dtor = compound_dtor;
-}
-
-static inline void folio_set_compound_dtor(struct folio *folio,
- enum compound_dtor_id compound_dtor)
-{
- VM_BUG_ON_FOLIO(compound_dtor >= NR_COMPOUND_DTORS, folio);
- folio->_folio_dtor = compound_dtor;
-}
-
void destroy_large_folio(struct folio *folio);
-static inline void set_compound_order(struct page *page, unsigned int order)
-{
- struct folio *folio = (struct folio *)page;
-
- folio->_folio_order = order;
-#ifdef CONFIG_64BIT
- folio->_folio_nr_pages = 1U << order;
-#endif
-}
-
/* Returns the number of bytes in this potentially compound page. */
static inline unsigned long page_size(struct page *page)
{
@@ -1272,8 +1338,6 @@ static inline unsigned long thp_size(struct page *page)
return PAGE_SIZE << thp_order(page);
}
-void free_compound_page(struct page *page);
-
#ifdef CONFIG_MMU
/*
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
@@ -1284,15 +1348,15 @@ void free_compound_page(struct page *page);
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite(pte, vma);
return pte;
}
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
-void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+void set_pte_range(struct vm_fault *vmf, struct folio *folio,
+ struct page *page, unsigned int nr, unsigned long addr);
vm_fault_t finish_fault(struct vm_fault *vmf);
-vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
@@ -1641,26 +1705,26 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
-static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
- return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
+ return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return page->_last_cpupid;
+ return folio->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
{
page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
+ return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
-extern int page_cpupid_xchg_last(struct page *page, int cpupid);
+int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
@@ -1668,11 +1732,12 @@ static inline void page_cpupid_reset_last(struct page *page)
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int folio_xchg_access_time(struct folio *folio, int time)
{
int last_time;
- last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
+ last_time = folio_xchg_last_cpupid(folio,
+ time >> PAGE_ACCESS_TIME_BUCKETS);
return last_time << PAGE_ACCESS_TIME_BUCKETS;
}
@@ -1681,24 +1746,24 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
unsigned int pid_bit;
pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
- if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->access_pids[1])) {
- __set_bit(pid_bit, &vma->numab_state->access_pids[1]);
+ if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
+ __set_bit(pid_bit, &vma->numab_state->pids_active[1]);
}
}
#else /* !CONFIG_NUMA_BALANCING */
-static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
- return page_to_nid(page); /* XXX */
+ return folio_nid(folio); /* XXX */
}
-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int folio_xchg_access_time(struct folio *folio, int time)
{
return 0;
}
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return page_to_nid(page); /* XXX */
+ return folio_nid(folio); /* XXX */
}
static inline int cpupid_to_nid(int cpupid)
@@ -1750,7 +1815,7 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
static inline u8 page_kasan_tag(const struct page *page)
{
- u8 tag = 0xff;
+ u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
@@ -1779,7 +1844,7 @@ static inline void page_kasan_tag_set(struct page *page, u8 tag)
static inline void page_kasan_tag_reset(struct page *page)
{
if (kasan_enabled())
- page_kasan_tag_set(page, 0xff);
+ page_kasan_tag_set(page, KASAN_TAG_KERNEL);
}
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
@@ -1899,50 +1964,68 @@ static inline bool page_maybe_dma_pinned(struct page *page)
*
* The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
*/
-static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
- struct page *page)
+static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct folio *folio)
{
VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
return false;
- return page_maybe_dma_pinned(page);
+ return folio_maybe_dma_pinned(folio);
+}
+
+/**
+ * is_zero_page - Query if a page is a zero page
+ * @page: The page to query
+ *
+ * This returns true if @page is one of the permanent zero pages.
+ */
+static inline bool is_zero_page(const struct page *page)
+{
+ return is_zero_pfn(page_to_pfn(page));
+}
+
+/**
+ * is_zero_folio - Query if a folio is a zero page
+ * @folio: The folio to query
+ *
+ * This returns true if @folio is one of the permanent zero pages.
+ */
+static inline bool is_zero_folio(const struct folio *folio)
+{
+ return is_zero_page(&folio->page);
}
-/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
+/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
#ifdef CONFIG_MIGRATION
-static inline bool is_longterm_pinnable_page(struct page *page)
+static inline bool folio_is_longterm_pinnable(struct folio *folio)
{
#ifdef CONFIG_CMA
- int mt = get_pageblock_migratetype(page);
+ int mt = folio_migratetype(folio);
if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
return false;
#endif
- /* The zero page may always be pinned */
- if (is_zero_pfn(page_to_pfn(page)))
+ /* The zero page can be "pinned" but gets special handling. */
+ if (is_zero_folio(folio))
return true;
/* Coherent device memory must always allow eviction. */
- if (is_device_coherent_page(page))
+ if (folio_is_device_coherent(folio))
return false;
- /* Otherwise, non-movable zone pages can be pinned. */
- return !is_zone_movable_page(page);
+ /* Otherwise, non-movable zone folios can be pinned. */
+ return !folio_is_zone_movable(folio);
+
}
#else
-static inline bool is_longterm_pinnable_page(struct page *page)
+static inline bool folio_is_longterm_pinnable(struct folio *folio)
{
return true;
}
#endif
-static inline bool folio_is_longterm_pinnable(struct folio *folio)
-{
- return is_longterm_pinnable_page(&folio->page);
-}
-
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
@@ -1978,7 +2061,7 @@ static inline long folio_nr_pages(struct folio *folio)
#ifdef CONFIG_64BIT
return folio->_folio_nr_pages;
#else
- return 1L << folio->_folio_order;
+ return 1L << (folio->_flags_1 & 0xff);
#endif
}
@@ -1996,7 +2079,7 @@ static inline unsigned long compound_nr(struct page *page)
#ifdef CONFIG_64BIT
return folio->_folio_nr_pages;
#else
- return 1L << folio->_folio_order;
+ return 1L << (folio->_flags_1 & 0xff);
#endif
}
@@ -2142,7 +2225,6 @@ static inline void *folio_address(const struct folio *folio)
return page_address(&folio->page);
}
-extern void *page_rmapping(struct page *page);
extern pgoff_t __page_file_index(struct page *page);
/*
@@ -2210,18 +2292,6 @@ extern void pagefault_out_of_memory(void);
#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
- * Flags passed to show_mem() and show_free_areas() to suppress output in
- * various contexts.
- */
-#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
-
-extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
-static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask)
-{
- __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
-}
-
-/*
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
@@ -2277,6 +2347,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
@@ -2289,9 +2361,9 @@ static inline void zap_vma_pages(struct vm_area_struct *vma)
zap_page_range_single(vma, vma->vm_start,
vma->vm_end - vma->vm_start, NULL);
}
-void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
+void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long start,
- unsigned long end, bool mm_wr_locked);
+ unsigned long end, unsigned long tree_end, bool mm_wr_locked);
struct mmu_notifier_range;
@@ -2312,7 +2384,11 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int generic_error_remove_page(struct address_space *mapping, struct page *page);
+int generic_error_remove_folio(struct address_space *mapping,
+ struct folio *folio);
+
+struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
+ unsigned long address, struct pt_regs *regs);
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
@@ -2353,27 +2429,57 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
+static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
+ unsigned long addr);
+
extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
-extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, unsigned int gup_flags);
long get_user_pages_remote(struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas, int *locked);
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ int *locked);
long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas, int *locked);
+ int *locked);
+
+/*
+ * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
+ */
+static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
+ unsigned long addr,
+ int gup_flags,
+ struct vm_area_struct **vmap)
+{
+ struct page *page;
+ struct vm_area_struct *vma;
+ int got;
+
+ if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
+ return ERR_PTR(-EINVAL);
+
+ got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
+
+ if (got < 0)
+ return ERR_PTR(got);
+
+ vma = vma_lookup(mm, addr);
+ if (WARN_ON_ONCE(!vma)) {
+ put_page(page);
+ return ERR_PTR(-EINVAL);
+ }
+
+ *vmap = vma;
+ return page;
+}
+
long get_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas);
+ unsigned int gup_flags, struct page **pages);
long pin_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas);
+ unsigned int gup_flags, struct page **pages);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
@@ -2383,6 +2489,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
+void folio_add_pin(struct folio *folio);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
@@ -2400,7 +2507,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
- bool need_rmap_locks);
+ bool need_rmap_locks, bool for_stack);
/*
* Flags used by change_protection(). For now we make it a bitmap so
@@ -2422,6 +2529,7 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
+bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
{
@@ -2547,14 +2655,6 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
*maxrss = hiwater_rss;
}
-#if defined(SPLIT_RSS_COUNTING)
-void sync_mm_rss(struct mm_struct *mm);
-#else
-static inline void sync_mm_rss(struct mm_struct *mm)
-{
-}
-#endif
-
#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline int pte_special(pte_t pte)
{
@@ -2707,42 +2807,93 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU */
+static inline struct ptdesc *virt_to_ptdesc(const void *x)
+{
+ return page_ptdesc(virt_to_page(x));
+}
+
+static inline void *ptdesc_to_virt(const struct ptdesc *pt)
+{
+ return page_to_virt(ptdesc_page(pt));
+}
+
+static inline void *ptdesc_address(const struct ptdesc *pt)
+{
+ return folio_address(ptdesc_folio(pt));
+}
+
+static inline bool pagetable_is_reserved(struct ptdesc *pt)
+{
+ return folio_test_reserved(ptdesc_folio(pt));
+}
+
+/**
+ * pagetable_alloc - Allocate pagetables
+ * @gfp: GFP flags
+ * @order: desired pagetable order
+ *
+ * pagetable_alloc allocates memory for page tables as well as a page table
+ * descriptor to describe that memory.
+ *
+ * Return: The ptdesc describing the allocated page tables.
+ */
+static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order)
+{
+ struct page *page = alloc_pages(gfp | __GFP_COMP, order);
+
+ return page_ptdesc(page);
+}
+
+/**
+ * pagetable_free - Free pagetables
+ * @pt: The page table descriptor
+ *
+ * pagetable_free frees the memory of all page tables described by a page
+ * table descriptor and the memory for the descriptor itself.
+ */
+static inline void pagetable_free(struct ptdesc *pt)
+{
+ struct page *page = ptdesc_page(pt);
+
+ __free_pages(page, compound_order(page));
+}
+
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
-extern bool ptlock_alloc(struct page *page);
-extern void ptlock_free(struct page *page);
+bool ptlock_alloc(struct ptdesc *ptdesc);
+void ptlock_free(struct ptdesc *ptdesc);
-static inline spinlock_t *ptlock_ptr(struct page *page)
+static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
- return page->ptl;
+ return ptdesc->ptl;
}
#else /* ALLOC_SPLIT_PTLOCKS */
static inline void ptlock_cache_init(void)
{
}
-static inline bool ptlock_alloc(struct page *page)
+static inline bool ptlock_alloc(struct ptdesc *ptdesc)
{
return true;
}
-static inline void ptlock_free(struct page *page)
+static inline void ptlock_free(struct ptdesc *ptdesc)
{
}
-static inline spinlock_t *ptlock_ptr(struct page *page)
+static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
- return &page->ptl;
+ return &ptdesc->ptl;
}
#endif /* ALLOC_SPLIT_PTLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
- return ptlock_ptr(pmd_page(*pmd));
+ return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
}
-static inline bool ptlock_init(struct page *page)
+static inline bool ptlock_init(struct ptdesc *ptdesc)
{
/*
* prep_new_page() initialize page->private (and therefore page->ptl)
@@ -2751,10 +2902,10 @@ static inline bool ptlock_init(struct page *page)
* It can happen if arch try to use slab for page table allocation:
* slab code uses page->slab_cache, which share storage with page->ptl.
*/
- VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
- if (!ptlock_alloc(page))
+ VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
+ if (!ptlock_alloc(ptdesc))
return false;
- spin_lock_init(ptlock_ptr(page));
+ spin_lock_init(ptlock_ptr(ptdesc));
return true;
}
@@ -2767,34 +2918,49 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
return &mm->page_table_lock;
}
static inline void ptlock_cache_init(void) {}
-static inline bool ptlock_init(struct page *page) { return true; }
-static inline void ptlock_free(struct page *page) {}
+static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
+static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
-static inline bool pgtable_pte_page_ctor(struct page *page)
+static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
{
- if (!ptlock_init(page))
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ if (!ptlock_init(ptdesc))
return false;
- __SetPageTable(page);
- inc_lruvec_page_state(page, NR_PAGETABLE);
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
return true;
}
-static inline void pgtable_pte_page_dtor(struct page *page)
+static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
{
- ptlock_free(page);
- __ClearPageTable(page);
- dec_lruvec_page_state(page, NR_PAGETABLE);
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ ptlock_free(ptdesc);
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
}
-#define pte_offset_map_lock(mm, pmd, address, ptlp) \
-({ \
- spinlock_t *__ptl = pte_lockptr(mm, pmd); \
- pte_t *__pte = pte_offset_map(pmd, address); \
- *(ptlp) = __ptl; \
- spin_lock(__ptl); \
- __pte; \
-})
+pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
+static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
+{
+ return __pte_offset_map(pmd, addr, NULL);
+}
+
+pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
+static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp)
+{
+ pte_t *pte;
+
+ __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
+ return pte;
+}
+
+pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
#define pte_unmap_unlock(pte, ptl) do { \
spin_unlock(ptl); \
@@ -2822,28 +2988,33 @@ static inline struct page *pmd_pgtable_page(pmd_t *pmd)
return virt_to_page((void *)((unsigned long) pmd & mask));
}
+static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
+{
+ return page_ptdesc(pmd_pgtable_page(pmd));
+}
+
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
- return ptlock_ptr(pmd_pgtable_page(pmd));
+ return ptlock_ptr(pmd_ptdesc(pmd));
}
-static inline bool pmd_ptlock_init(struct page *page)
+static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- page->pmd_huge_pte = NULL;
+ ptdesc->pmd_huge_pte = NULL;
#endif
- return ptlock_init(page);
+ return ptlock_init(ptdesc);
}
-static inline void pmd_ptlock_free(struct page *page)
+static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
+ VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
#endif
- ptlock_free(page);
+ ptlock_free(ptdesc);
}
-#define pmd_huge_pte(mm, pmd) (pmd_pgtable_page(pmd)->pmd_huge_pte)
+#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
#else
@@ -2852,8 +3023,8 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
return &mm->page_table_lock;
}
-static inline bool pmd_ptlock_init(struct page *page) { return true; }
-static inline void pmd_ptlock_free(struct page *page) {}
+static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
+static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
@@ -2866,20 +3037,24 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
-static inline bool pgtable_pmd_page_ctor(struct page *page)
+static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
{
- if (!pmd_ptlock_init(page))
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ if (!pmd_ptlock_init(ptdesc))
return false;
- __SetPageTable(page);
- inc_lruvec_page_state(page, NR_PAGETABLE);
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
return true;
}
-static inline void pgtable_pmd_page_dtor(struct page *page)
+static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
{
- pmd_ptlock_free(page);
- __ClearPageTable(page);
- dec_lruvec_page_state(page, NR_PAGETABLE);
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ pmd_ptlock_free(ptdesc);
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
}
/*
@@ -2901,6 +3076,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
return ptl;
}
+static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
+}
+
+static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+}
+
extern void __init pagecache_init(void);
extern void free_initmem(void);
@@ -2915,7 +3106,8 @@ extern unsigned long free_reserved_area(void *start, void *end,
extern void adjust_managed_page_count(struct page *page, long count);
-extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
+extern void reserve_bootmem_region(phys_addr_t start,
+ phys_addr_t end, int nid);
/* Free the reserved page into the buddy system, so it gets managed. */
static inline void free_reserved_page(struct page *page)
@@ -2933,6 +3125,11 @@ static inline void mark_page_reserved(struct page *page)
adjust_managed_page_count(page, -1);
}
+static inline void free_reserved_ptdesc(struct ptdesc *pt)
+{
+ free_reserved_page(ptdesc_page(pt));
+}
+
/*
* Default method to free all the __init memory into the buddy system.
* The freed pages will be poisoned with pattern "poison" if it's within
@@ -2994,19 +3191,13 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_range(unsigned long, int, unsigned long,
- unsigned long, unsigned long, enum meminit_context,
- struct vmem_altmap *, int migratetype);
-extern void setup_per_zone_wmarks(void);
-extern void calculate_min_free_kbytes(void);
-extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
-static inline void show_mem(unsigned int flags, nodemask_t *nodemask)
+static inline void show_mem(void)
{
- __show_mem(flags, nodemask, MAX_NR_ZONES - 1);
+ __show_mem(0, NULL, MAX_NR_ZONES - 1);
}
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
@@ -3020,11 +3211,6 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
-/* page_alloc.c */
-extern int min_free_kbytes;
-extern int watermark_boost_factor;
-extern int watermark_scale_factor;
-
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
@@ -3070,22 +3256,73 @@ extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct vm_area_struct *next);
extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff);
-extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
- struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
- unsigned long end, unsigned long vm_flags, struct anon_vma *,
- struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx,
- struct anon_vma_name *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
-extern int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *,
- unsigned long addr, int new_below);
-extern int split_vma(struct vma_iterator *vmi, struct vm_area_struct *,
- unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
+struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long vm_flags,
+ struct mempolicy *policy,
+ struct vm_userfaultfd_ctx uffd_ctx,
+ struct anon_vma_name *anon_name);
+
+/* We are about to modify the VMA's flags. */
+static inline struct vm_area_struct
+*vma_modify_flags(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags)
+{
+ return vma_modify(vmi, prev, vma, start, end, new_flags,
+ vma_policy(vma), vma->vm_userfaultfd_ctx,
+ anon_vma_name(vma));
+}
+
+/* We are about to modify the VMA's flags and/or anon_name. */
+static inline struct vm_area_struct
+*vma_modify_flags_name(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long new_flags,
+ struct anon_vma_name *new_name)
+{
+ return vma_modify(vmi, prev, vma, start, end, new_flags,
+ vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
+}
+
+/* We are about to modify the VMA's memory policy. */
+static inline struct vm_area_struct
+*vma_modify_policy(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct mempolicy *new_pol)
+{
+ return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
+ new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
+}
+
+/* We are about to modify the VMA's flags and/or uffd context. */
+static inline struct vm_area_struct
+*vma_modify_flags_uffd(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags,
+ struct vm_userfaultfd_ctx new_ctx)
+{
+ return vma_modify(vmi, prev, vma, start, end, new_flags,
+ vma_policy(vma), new_ctx, anon_vma_name(vma));
+}
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
@@ -3133,10 +3370,11 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate, struct list_head *uf);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf);
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
- bool downgrade);
+ bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
@@ -3144,7 +3382,7 @@ extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in,
#ifdef CONFIG_MMU
extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct list_head *uf, bool downgrade);
+ struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
@@ -3156,8 +3394,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len)
static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif
-/* These take the mm semaphore themselves */
-extern int __must_check vm_brk(unsigned long, unsigned long);
+/* This takes the mm semaphore itself */
extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
@@ -3190,16 +3427,11 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
-extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
+struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
-extern int expand_downwards(struct vm_area_struct *vma,
- unsigned long address);
-#if VM_GROWSUP
-extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
-#else
- #define expand_upwards(vma, address) (0)
-#endif
+int expand_downwards(struct vm_area_struct *vma, unsigned long address);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
@@ -3226,15 +3458,26 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
return mtree_load(&mm->mm_mt, addr);
}
+static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_GROWSDOWN)
+ return stack_guard_gap;
+
+ /* See reasoning around the VM_SHADOW_STACK definition */
+ if (vma->vm_flags & VM_SHADOW_STACK)
+ return PAGE_SIZE;
+
+ return 0;
+}
+
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
{
+ unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
- if (vma->vm_flags & VM_GROWSDOWN) {
- vm_start -= stack_guard_gap;
- if (vm_start > vma->vm_start)
- vm_start = 0;
- }
+ vm_start -= gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
return vm_start;
}
@@ -3294,7 +3537,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#endif
-struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
+ unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
@@ -3342,6 +3586,26 @@ static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
return VM_FAULT_OOM;
+ else if (err == -EHWPOISON)
+ return VM_FAULT_HWPOISON;
+ return VM_FAULT_SIGBUS;
+}
+
+/*
+ * Convert errno to return value for ->page_mkwrite() calls.
+ *
+ * This should eventually be merged with vmf_error() above, but will need a
+ * careful audit of all vmf_error() callers.
+ */
+static inline vm_fault_t vmf_fs_error(int err)
+{
+ if (err == 0)
+ return VM_FAULT_LOCKED;
+ if (err == -EFAULT || err == -EAGAIN)
+ return VM_FAULT_NOPAGE;
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ /* -ENOSPC, -EDQUOT, -EIO ... */
return VM_FAULT_SIGBUS;
}
@@ -3363,15 +3627,24 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
* a (NUMA hinting) fault is required.
*/
-static inline bool gup_can_follow_protnone(unsigned int flags)
+static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
+ unsigned int flags)
{
/*
- * FOLL_FORCE has to be able to make progress even if the VMA is
- * inaccessible. Further, FOLL_FORCE access usually does not represent
- * application behaviour and we should avoid triggering NUMA hinting
- * faults.
+ * If callers don't want to honor NUMA hinting faults, no need to
+ * determine if we would actually have to trigger a NUMA hinting fault.
*/
- return flags & FOLL_FORCE;
+ if (!(flags & FOLL_HONOR_NUMA_FAULT))
+ return true;
+
+ /*
+ * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
+ *
+ * Requiring a fault here even for inaccessible VMAs would mean that
+ * FOLL_FORCE cannot make any progress, because handle_mm_fault()
+ * refuses to process NUMA hinting faults in inaccessible VMAs.
+ */
+ return !vma_is_accessible(vma);
}
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
@@ -3442,8 +3715,8 @@ static inline bool debug_pagealloc_enabled(void)
}
/*
- * For use in fast paths after init_debug_pagealloc() has run, or when a
- * false negative result is not harmful when called too early.
+ * For use in fast paths after mem_debugging_and_hardening_init() has run,
+ * or when a false negative result is not harmful when called too early.
*/
static inline bool debug_pagealloc_enabled_static(void)
{
@@ -3453,13 +3726,12 @@ static inline bool debug_pagealloc_enabled_static(void)
return static_branch_unlikely(&_debug_pagealloc_enabled);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* To support DEBUG_PAGEALLOC architecture must ensure that
* __kernel_map_pages() never fails
*/
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
-
+#ifdef CONFIG_DEBUG_PAGEALLOC
static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
{
if (debug_pagealloc_enabled_static())
@@ -3471,9 +3743,58 @@ static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 0);
}
+
+extern unsigned int _debug_guardpage_minorder;
+DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+ return _debug_guardpage_minorder;
+}
+
+static inline bool debug_guardpage_enabled(void)
+{
+ return static_branch_unlikely(&_debug_guardpage_enabled);
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+ if (!debug_guardpage_enabled())
+ return false;
+
+ return PageGuard(page);
+}
+
+bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
+ int migratetype);
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
+{
+ if (!debug_guardpage_enabled())
+ return false;
+ return __set_page_guard(zone, page, order, migratetype);
+}
+
+void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
+ int migratetype);
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype)
+{
+ if (!debug_guardpage_enabled())
+ return;
+ __clear_page_guard(zone, page, order, migratetype);
+}
+
#else /* CONFIG_DEBUG_PAGEALLOC */
static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool debug_guardpage_enabled(void) { return false; }
+static inline bool page_is_guard(struct page *page) { return false; }
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) { return false; }
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order, int migratetype) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
@@ -3550,14 +3871,59 @@ void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif
-#ifdef CONFIG_ARCH_WANT_OPTIMIZE_VMEMMAP
-static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
- struct dev_pagemap *pgmap)
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ /* number of pfns from base where pfn_to_page() is valid */
+ if (altmap)
+ return altmap->reserve + altmap->free;
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
{
- return is_power_of_2(sizeof(struct page)) &&
- pgmap && (pgmap_vmemmap_nr(pgmap) > 1) && !altmap;
+ altmap->alloc -= nr_pfns;
}
#else
+static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+}
+#endif
+
+#define VMEMMAP_RESERVE_NR 2
+#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
+static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ unsigned long nr_pages;
+ unsigned long nr_vmemmap_pages;
+
+ if (!pgmap || !is_power_of_2(sizeof(struct page)))
+ return false;
+
+ nr_pages = pgmap_vmemmap_nr(pgmap);
+ nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
+ /*
+ * For vmemmap optimization with DAX we need minimum 2 vmemmap
+ * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
+ */
+ return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
+}
+/*
+ * If we don't have an architecture override, use the generic rule
+ */
+#ifndef vmemmap_can_optimize
+#define vmemmap_can_optimize __vmemmap_can_optimize
+#endif
+
+#else
static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{
@@ -3576,6 +3942,7 @@ enum mf_flags {
MF_UNPOISON = 1 << 4,
MF_SW_SIMULATED = 1 << 5,
MF_NO_RETRY = 1 << 6,
+ MF_MEM_PRE_REMOVE = 1 << 7,
};
int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
unsigned long count, int mf_flags);
@@ -3586,6 +3953,10 @@ extern void shake_page(struct page *p);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(unsigned long pfn, int flags);
#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Sysfs entries for memory failure handling statistics.
+ */
+extern const struct attribute_group memory_failure_attr_group;
extern void memory_failure_queue(unsigned long pfn, int flags);
extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
@@ -3678,11 +4049,6 @@ enum mf_action_page_type {
MF_MSG_UNKNOWN,
};
-/*
- * Sysfs entries for memory failure handling statistics.
- */
-extern const struct attribute_group memory_failure_attr_group;
-
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
unsigned long addr_hint,
@@ -3712,33 +4078,6 @@ static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern unsigned int _debug_guardpage_minorder;
-DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
-
-static inline unsigned int debug_guardpage_minorder(void)
-{
- return _debug_guardpage_minorder;
-}
-
-static inline bool debug_guardpage_enabled(void)
-{
- return static_branch_unlikely(&_debug_guardpage_enabled);
-}
-
-static inline bool page_is_guard(struct page *page)
-{
- if (!debug_guardpage_enabled())
- return false;
-
- return PageGuard(page);
-}
-#else
-static inline unsigned int debug_guardpage_minorder(void) { return 0; }
-static inline bool debug_guardpage_enabled(void) { return false; }
-static inline bool page_is_guard(struct page *page) { return false; }
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
#if MAX_NUMNODES > 1
void __init setup_nr_node_ids(void);
#else
@@ -3773,25 +4112,26 @@ static inline void mem_dump_obj(void *object) {}
#endif
/**
- * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
+ * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
+ * handle them.
* @seals: the seals to check
* @vma: the vma to operate on
*
- * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
- * the vma flags. Return 0 if check pass, or <0 for errors.
+ * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper
+ * check/handling on the vma flags. Return 0 if check pass, or <0 for errors.
*/
-static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
+static inline int seal_check_write(int seals, struct vm_area_struct *vma)
{
- if (seals & F_SEAL_FUTURE_WRITE) {
+ if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
/*
* New PROT_WRITE and MAP_SHARED mmaps are not allowed when
- * "future write" seal active.
+ * write seals are active.
*/
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
return -EPERM;
/*
- * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+ * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
* MAP_SHARED and read-only, take care to not allow mprotect to
* revert protections on such mappings. Do this only for shared
* mappings. For private mappings, don't need to mask
@@ -3816,4 +4156,30 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
}
#endif
+#ifdef CONFIG_UNACCEPTED_MEMORY
+
+bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end);
+void accept_memory(phys_addr_t start, phys_addr_t end);
+
+#else
+
+static inline bool range_contains_unaccepted_memory(phys_addr_t start,
+ phys_addr_t end)
+{
+ return false;
+}
+
+static inline void accept_memory(phys_addr_t start, phys_addr_t end)
+{
+}
+
+#endif
+
+static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
+{
+ phys_addr_t paddr = pfn << PAGE_SHIFT;
+
+ return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
+}
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 0e1d239a882c..f4fe593c1400 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/huge_mm.h>
+#include <linux/mm_types.h>
#include <linux/swap.h>
#include <linux/string.h>
#include <linux/userfaultfd_k.h>
@@ -231,22 +232,27 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
if (folio_test_unevictable(folio) || !lrugen->enabled)
return false;
/*
- * There are three common cases for this page:
- * 1. If it's hot, e.g., freshly faulted in or previously hot and
- * migrated, add it to the youngest generation.
- * 2. If it's cold but can't be evicted immediately, i.e., an anon page
- * not in swapcache or a dirty page pending writeback, add it to the
- * second oldest generation.
- * 3. Everything else (clean, cold) is added to the oldest generation.
+ * There are four common cases for this page:
+ * 1. If it's hot, i.e., freshly faulted in, add it to the youngest
+ * generation, and it's protected over the rest below.
+ * 2. If it can't be evicted immediately, i.e., a dirty page pending
+ * writeback, add it to the second youngest generation.
+ * 3. If it should be evicted first, e.g., cold and clean from
+ * folio_rotate_reclaimable(), add it to the oldest generation.
+ * 4. Everything else falls between 2 & 3 above and is added to the
+ * second oldest generation if it's considered inactive, or the
+ * oldest generation otherwise. See lru_gen_is_active().
*/
if (folio_test_active(folio))
seq = lrugen->max_seq;
else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
(folio_test_reclaim(folio) &&
(folio_test_dirty(folio) || folio_test_writeback(folio))))
- seq = lrugen->min_seq[type] + 1;
- else
+ seq = lrugen->max_seq - 1;
+ else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq)
seq = lrugen->min_seq[type];
+ else
+ seq = lrugen->min_seq[type] + 1;
gen = lru_gen_from_seq(seq);
flags = (gen + 1UL) << LRU_GEN_PGOFF;
@@ -323,12 +329,6 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
list_add(&folio->lru, &lruvec->lists[lru]);
}
-static __always_inline void add_page_to_lru_list(struct page *page,
- struct lruvec *lruvec)
-{
- lruvec_add_folio(lruvec, page_folio(page));
-}
-
static __always_inline
void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
{
@@ -357,22 +357,7 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
-folio_nr_pages(folio));
}
-static __always_inline void del_page_from_lru_list(struct page *page,
- struct lruvec *lruvec)
-{
- lruvec_del_folio(lruvec, page_folio(page));
-}
-
#ifdef CONFIG_ANON_VMA_NAME
-/*
- * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
- * either keep holding the lock while using the returned pointer or it should
- * raise anon_vma_name refcount before releasing the lock.
- */
-extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
-extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
-extern void anon_vma_name_free(struct kref *kref);
-
/* mmap_lock should be read-locked */
static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
{
@@ -427,16 +412,6 @@ static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
}
#else /* CONFIG_ANON_VMA_NAME */
-static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
-{
- return NULL;
-}
-
-static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
-{
- return NULL;
-}
-
static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
@@ -535,6 +510,27 @@ static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
return atomic_read(&mm->tlb_flush_pending) > 1;
}
+#ifdef CONFIG_MMU
+/*
+ * Computes the pte marker to copy from the given source entry into dst_vma.
+ * If no marker should be copied, returns 0.
+ * The caller should insert a new pte created with make_pte_marker().
+ */
+static inline pte_marker copy_pte_marker(
+ swp_entry_t entry, struct vm_area_struct *dst_vma)
+{
+ pte_marker srcm = pte_marker_get(entry);
+ /* Always copy error entries. */
+ pte_marker dstm = srcm & PTE_MARKER_POISONED;
+
+ /* Only copy PTE markers if UFFD register matches. */
+ if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
+ dstm |= PTE_MARKER_UFFD_WP;
+
+ return dstm;
+}
+#endif
+
/*
* If this pte is wr-protected by uffd-wp in any form, arm the special pte to
* replace a none pte. NOTE! This should only be called when *pte is already
@@ -555,7 +551,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
bool arm_uffd_pte = false;
/* The current status of the pte should be "cleared" before calling */
- WARN_ON_ONCE(!pte_none(*pte));
+ WARN_ON_ONCE(!pte_none(ptep_get(pte)));
/*
* NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 306a3d1a0fa6..8b611e13153e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -125,36 +125,11 @@ struct page {
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
- union {
- /**
- * dma_addr_upper: might require a 64-bit
- * value on 32-bit architectures.
- */
- unsigned long dma_addr_upper;
- /**
- * For frag page support, not supported in
- * 32-bit architectures with 64-bit DMA.
- */
- atomic_long_t pp_frag_count;
- };
+ atomic_long_t pp_ref_count;
};
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
};
- struct { /* Page table pages */
- unsigned long _pt_pad_1; /* compound_head */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
- unsigned long _pt_pad_2; /* mapping */
- union {
- struct mm_struct *pt_mm; /* x86 pgds only */
- atomic_t pt_frag_refcount; /* powerpc */
- };
-#if ALLOC_SPLIT_PTLOCKS
- spinlock_t *ptl;
-#else
- spinlock_t ptl;
-#endif
- };
struct { /* ZONE_DEVICE pages */
/** @pgmap: Points to the hosting device page map. */
struct dev_pagemap *pgmap;
@@ -213,6 +188,10 @@ struct page {
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ int _last_cpupid;
+#endif
+
#ifdef CONFIG_KMSAN
/*
* KMSAN metadata for this page:
@@ -224,10 +203,6 @@ struct page {
struct page *kmsan_shadow;
struct page *kmsan_origin;
#endif
-
-#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
- int _last_cpupid;
-#endif
} _struct_page_alignment;
/*
@@ -262,6 +237,14 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
return (struct page *)(~ENCODE_PAGE_BITS & (unsigned long)page);
}
+/*
+ * A swap entry has to fit into a "unsigned long", as the entry is hidden
+ * in the "index" field of the swapper address space.
+ */
+typedef struct {
+ unsigned long val;
+} swp_entry_t;
+
/**
* struct folio - Represents a contiguous set of bytes.
* @flags: Identical to the page flags.
@@ -272,14 +255,14 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
* @index: Offset within the file, in units of pages. For anonymous memory,
* this is the index from the beginning of the mmap.
* @private: Filesystem per-folio data (see folio_attach_private()).
- * Used for swp_entry_t if folio_test_swapcache().
+ * @swap: Used for swp_entry_t if folio_test_swapcache().
* @_mapcount: Do not access this member directly. Use folio_mapcount() to
* find out how many times this folio is mapped by userspace.
* @_refcount: Do not access this member directly. Use folio_ref_count()
* to find how many references there are to this folio.
* @memcg_data: Memory Control Group data.
- * @_folio_dtor: Which destructor to use for this folio.
- * @_folio_order: Do not use directly, call folio_order().
+ * @virtual: Virtual address in the kernel direct map.
+ * @_last_cpupid: IDs of last CPU and last process that accessed the folio.
* @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
* @_nr_pages_mapped: Do not use directly, call folio_mapcount().
* @_pincount: Do not use directly, call folio_maybe_dma_pinned().
@@ -317,12 +300,21 @@ struct folio {
};
struct address_space *mapping;
pgoff_t index;
- void *private;
+ union {
+ void *private;
+ swp_entry_t swap;
+ };
atomic_t _mapcount;
atomic_t _refcount;
#ifdef CONFIG_MEMCG
unsigned long memcg_data;
#endif
+#if defined(WANT_PAGE_VIRTUAL)
+ void *virtual;
+#endif
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ int _last_cpupid;
+#endif
/* private: the union with struct page is transitional */
};
struct page page;
@@ -331,9 +323,8 @@ struct folio {
struct {
unsigned long _flags_1;
unsigned long _head_1;
+ unsigned long _folio_avail;
/* public: */
- unsigned char _folio_dtor;
- unsigned char _folio_order;
atomic_t _entire_mapcount;
atomic_t _nr_pages_mapped;
atomic_t _pincount;
@@ -379,6 +370,12 @@ FOLIO_MATCH(_refcount, _refcount);
#ifdef CONFIG_MEMCG
FOLIO_MATCH(memcg_data, memcg_data);
#endif
+#if defined(WANT_PAGE_VIRTUAL)
+FOLIO_MATCH(virtual, virtual);
+#endif
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+FOLIO_MATCH(_last_cpupid, _last_cpupid);
+#endif
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct folio, fl) == \
@@ -391,8 +388,89 @@ FOLIO_MATCH(compound_head, _head_1);
offsetof(struct page, pg) + 2 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_2);
FOLIO_MATCH(compound_head, _head_2);
+FOLIO_MATCH(flags, _flags_2a);
+FOLIO_MATCH(compound_head, _head_2a);
#undef FOLIO_MATCH
+/**
+ * struct ptdesc - Memory descriptor for page tables.
+ * @__page_flags: Same as page flags. Unused for page tables.
+ * @pt_rcu_head: For freeing page table pages.
+ * @pt_list: List of used page tables. Used for s390 and x86.
+ * @_pt_pad_1: Padding that aliases with page's compound head.
+ * @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
+ * @__page_mapping: Aliases with page->mapping. Unused for page tables.
+ * @pt_mm: Used for x86 pgds.
+ * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
+ * @_pt_pad_2: Padding to ensure proper alignment.
+ * @ptl: Lock for the page table.
+ * @__page_type: Same as page->page_type. Unused for page tables.
+ * @__page_refcount: Same as page refcount.
+ * @pt_memcg_data: Memcg data. Tracked for page tables here.
+ *
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
+ */
+struct ptdesc {
+ unsigned long __page_flags;
+
+ union {
+ struct rcu_head pt_rcu_head;
+ struct list_head pt_list;
+ struct {
+ unsigned long _pt_pad_1;
+ pgtable_t pmd_huge_pte;
+ };
+ };
+ unsigned long __page_mapping;
+
+ union {
+ struct mm_struct *pt_mm;
+ atomic_t pt_frag_refcount;
+ };
+
+ union {
+ unsigned long _pt_pad_2;
+#if ALLOC_SPLIT_PTLOCKS
+ spinlock_t *ptl;
+#else
+ spinlock_t ptl;
+#endif
+ };
+ unsigned int __page_type;
+ atomic_t __page_refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long pt_memcg_data;
+#endif
+};
+
+#define TABLE_MATCH(pg, pt) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
+TABLE_MATCH(flags, __page_flags);
+TABLE_MATCH(compound_head, pt_list);
+TABLE_MATCH(compound_head, _pt_pad_1);
+TABLE_MATCH(mapping, __page_mapping);
+TABLE_MATCH(rcu_head, pt_rcu_head);
+TABLE_MATCH(page_type, __page_type);
+TABLE_MATCH(_refcount, __page_refcount);
+#ifdef CONFIG_MEMCG
+TABLE_MATCH(memcg_data, pt_memcg_data);
+#endif
+#undef TABLE_MATCH
+static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
+
+#define ptdesc_page(pt) (_Generic((pt), \
+ const struct ptdesc *: (const struct page *)(pt), \
+ struct ptdesc *: (struct page *)(pt)))
+
+#define ptdesc_folio(pt) (_Generic((pt), \
+ const struct ptdesc *: (const struct folio *)(pt), \
+ struct ptdesc *: (struct folio *)(pt)))
+
+#define page_ptdesc(p) (_Generic((p), \
+ const struct page *: (const struct ptdesc *)(p), \
+ struct page *: (struct ptdesc *)(p)))
+
/*
* Used for sizing the vmemmap region on some architectures
*/
@@ -471,14 +549,65 @@ struct anon_vma_name {
char name[];
};
+#ifdef CONFIG_ANON_VMA_NAME
+/*
+ * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
+ * either keep holding the lock while using the returned pointer or it should
+ * raise anon_vma_name refcount before releasing the lock.
+ */
+struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
+struct anon_vma_name *anon_vma_name_alloc(const char *name);
+void anon_vma_name_free(struct kref *kref);
+#else /* CONFIG_ANON_VMA_NAME */
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
+{
+ return NULL;
+}
+#endif
+
struct vma_lock {
struct rw_semaphore lock;
};
struct vma_numab_state {
+ /*
+ * Initialised as time in 'jiffies' after which VMA
+ * should be scanned. Delays first scan of new VMA by at
+ * least sysctl_numa_balancing_scan_delay:
+ */
unsigned long next_scan;
- unsigned long next_pid_reset;
- unsigned long access_pids[2];
+
+ /*
+ * Time in jiffies when pids_active[] is reset to
+ * detect phase change behaviour:
+ */
+ unsigned long pids_active_reset;
+
+ /*
+ * Approximate tracking of PIDs that trapped a NUMA hinting
+ * fault. May produce false positives due to hash collisions.
+ *
+ * [0] Previous PID tracking
+ * [1] Current PID tracking
+ *
+ * Window moves after next_pid_reset has expired approximately
+ * every VMA_PID_RESET_PERIOD jiffies:
+ */
+ unsigned long pids_active[2];
+
+ /* MM scan sequence ID when scan first started after VMA creation */
+ int start_scan_seq;
+
+ /*
+ * MM scan sequence ID when the VMA was last completely scanned.
+ * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
+ */
+ int prev_scan_seq;
};
/*
@@ -514,6 +643,20 @@ struct vm_area_struct {
};
#ifdef CONFIG_PER_VMA_LOCK
+ /*
+ * Can only be written (using WRITE_ONCE()) while holding both:
+ * - mmap_lock (in write mode)
+ * - vm_lock->lock (in write mode)
+ * Can be read reliably while holding one of:
+ * - mmap_lock (in read or write mode)
+ * - vm_lock->lock (in read or write mode)
+ * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
+ * while holding nothing (except RCU to keep the VMA struct allocated).
+ *
+ * This sequence counter is explicitly allowed to overflow; sequence
+ * counter reuse can only lead to occasional unnecessary use of the
+ * slowpath.
+ */
int vm_lock_seq;
struct vma_lock *vm_lock;
@@ -573,6 +716,12 @@ struct vm_area_struct {
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
+#ifdef CONFIG_NUMA
+#define vma_policy(vma) ((vma)->vm_policy)
+#else
+#define vma_policy(vma) NULL
+#endif
+
#ifdef CONFIG_SCHED_MM_CID
struct mm_cid {
u64 time;
@@ -581,8 +730,24 @@ struct mm_cid {
#endif
struct kioctx_table;
+struct iommu_mm_data;
struct mm_struct {
struct {
+ /*
+ * Fields which are often written to are placed in a separate
+ * cache line.
+ */
+ struct {
+ /**
+ * @mm_count: The number of references to &struct
+ * mm_struct (@mm_users count as 1).
+ *
+ * Use mmgrab()/mmdrop() to modify. When this drops to
+ * 0, the &struct mm_struct is freed.
+ */
+ atomic_t mm_count;
+ } ____cacheline_aligned_in_smp;
+
struct maple_tree mm_mt;
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
@@ -620,14 +785,6 @@ struct mm_struct {
*/
atomic_t mm_users;
- /**
- * @mm_count: The number of references to &struct mm_struct
- * (@mm_users count as 1).
- *
- * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
- * &struct mm_struct is freed.
- */
- atomic_t mm_count;
#ifdef CONFIG_SCHED_MM_CID
/**
* @pcpu_cid: Per-cpu current cid.
@@ -672,6 +829,20 @@ struct mm_struct {
* by mmlist_lock
*/
#ifdef CONFIG_PER_VMA_LOCK
+ /*
+ * This field has lock-like semantics, meaning it is sometimes
+ * accessed with ACQUIRE/RELEASE semantics.
+ * Roughly speaking, incrementing the sequence number is
+ * equivalent to releasing locks on VMAs; reading the sequence
+ * number can be part of taking a read lock on a VMA.
+ *
+ * Can be modified under write mmap_lock using RELEASE
+ * semantics.
+ * Can be read with no other protection when holding write
+ * mmap_lock.
+ * Can be read with ACQUIRE semantics if not holding write
+ * mmap_lock.
+ */
int mm_lock_seq;
#endif
@@ -771,13 +942,13 @@ struct mm_struct {
#endif
struct work_struct async_put_work;
-#ifdef CONFIG_IOMMU_SVA
- u32 pasid;
+#ifdef CONFIG_IOMMU_MM_DATA
+ struct iommu_mm_data *iommu_mm;
#endif
#ifdef CONFIG_KSM
/*
* Represent how many pages of this process are involved in KSM
- * merging.
+ * merging (not including ksm_zero_pages).
*/
unsigned long ksm_merging_pages;
/*
@@ -785,8 +956,13 @@ struct mm_struct {
* including merged and not merged.
*/
unsigned long ksm_rmap_items;
-#endif
-#ifdef CONFIG_LRU_GEN
+ /*
+ * Represent how many empty pages are merged with kernel zero
+ * pages when enabling KSM use_zero_pages.
+ */
+ unsigned long ksm_zero_pages;
+#endif /* CONFIG_KSM */
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
struct {
/* this mm_struct is on lru_gen_mm_list */
struct list_head list;
@@ -801,7 +977,7 @@ struct mm_struct {
struct mem_cgroup *memcg;
#endif
} lru_gen;
-#endif /* CONFIG_LRU_GEN */
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
} __randomize_layout;
/*
@@ -839,11 +1015,13 @@ struct lru_gen_mm_list {
spinlock_t lock;
};
+#endif /* CONFIG_LRU_GEN */
+
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+
void lru_gen_add_mm(struct mm_struct *mm);
void lru_gen_del_mm(struct mm_struct *mm);
-#ifdef CONFIG_MEMCG
void lru_gen_migrate_mm(struct mm_struct *mm);
-#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
@@ -864,7 +1042,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
WRITE_ONCE(mm->lru_gen.bitmap, -1);
}
-#else /* !CONFIG_LRU_GEN */
+#else /* !CONFIG_LRU_GEN_WALKS_MMU */
static inline void lru_gen_add_mm(struct mm_struct *mm)
{
@@ -874,11 +1052,9 @@ static inline void lru_gen_del_mm(struct mm_struct *mm)
{
}
-#ifdef CONFIG_MEMCG
static inline void lru_gen_migrate_mm(struct mm_struct *mm)
{
}
-#endif
static inline void lru_gen_init_mm(struct mm_struct *mm)
{
@@ -888,7 +1064,7 @@ static inline void lru_gen_use_mm(struct mm_struct *mm)
{
}
-#endif /* CONFIG_LRU_GEN */
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
struct vma_iterator {
struct ma_state mas;
@@ -899,7 +1075,8 @@ struct vma_iterator {
.mas = { \
.tree = &(__mm)->mm_mt, \
.index = __addr, \
- .node = MAS_START, \
+ .node = NULL, \
+ .status = ma_start, \
}, \
}
@@ -1070,7 +1247,8 @@ enum vm_fault_reason {
{ VM_FAULT_RETRY, "RETRY" }, \
{ VM_FAULT_FALLBACK, "FALLBACK" }, \
{ VM_FAULT_DONE_COW, "DONE_COW" }, \
- { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
+ { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }, \
+ { VM_FAULT_COMPLETED, "COMPLETED" }
struct vm_special_mapping {
const char *name; /* The name, e.g. "[vdso]". */
@@ -1104,14 +1282,6 @@ enum tlb_flush_reason {
NR_TLB_FLUSH_REASONS,
};
- /*
- * A swap entry has to fit into a "unsigned long", as the entry is hidden
- * in the "index" field of the swapper address space.
- */
-typedef struct {
- unsigned long val;
-} swp_entry_t;
-
/**
* enum fault_flag - Fault flag definitions.
* @FAULT_FLAG_WRITE: Fault was a write fault.
@@ -1251,6 +1421,15 @@ enum {
FOLL_PCI_P2PDMA = 1 << 10,
/* allow interrupts from generic signals */
FOLL_INTERRUPTIBLE = 1 << 11,
+ /*
+ * Always honor (trigger) NUMA hinting faults.
+ *
+ * FOLL_WRITE implicitly honors NUMA hinting faults because a
+ * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE
+ * apply). get_user_pages_fast_only() always implicitly honors NUMA
+ * hinting faults.
+ */
+ FOLL_HONOR_NUMA_FAULT = 1 << 12,
/* See also internal only FOLL flags in mm/internal.h */
};
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 5414b5c6a103..a2f6179b672b 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -9,9 +9,6 @@
*/
#include <linux/types.h>
-#include <linux/threads.h>
-#include <linux/atomic.h>
-#include <linux/cpumask.h>
#include <asm/page.h>
@@ -36,6 +33,8 @@ enum {
NR_MM_COUNTERS
};
+struct page;
+
struct page_frag {
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
@@ -52,8 +51,8 @@ struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
* The arch code makes the following promise: generic code can modify a
- * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
- * needed barriers), then call arch_tlbbatch_flush(), and the entries
+ * PTE, then call arch_tlbbatch_add_pending() (which internally provides
+ * all needed barriers), then call arch_tlbbatch_flush(), and the entries
* will be flushed on all CPUs by the time that arch_tlbbatch_flush()
* returns.
*/
diff --git a/include/linux/mman.h b/include/linux/mman.h
index cee1e4b566d8..dc7048824be8 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -15,6 +15,9 @@
#ifndef MAP_32BIT
#define MAP_32BIT 0
#endif
+#ifndef MAP_ABOVE4G
+#define MAP_ABOVE4G 0
+#endif
#ifndef MAP_HUGE_2MB
#define MAP_HUGE_2MB 0
#endif
@@ -50,6 +53,7 @@
| MAP_STACK \
| MAP_HUGETLB \
| MAP_32BIT \
+ | MAP_ABOVE4G \
| MAP_HUGE_2MB \
| MAP_HUGE_1GB)
@@ -152,6 +156,7 @@ calc_vm_flag_bits(unsigned long flags)
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
+ _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
arch_calc_vm_flag_bits(flags);
}
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index aab8f1b28d26..8d38dcb6d044 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -73,11 +73,25 @@ static inline void mmap_assert_write_locked(struct mm_struct *mm)
}
#ifdef CONFIG_PER_VMA_LOCK
+/*
+ * Drop all currently-held per-VMA locks.
+ * This is called from the mmap_lock implementation directly before releasing
+ * a write-locked mmap_lock (or downgrading it to read-locked).
+ * This should normally NOT be called manually from other places.
+ * If you want to call this manually anyway, keep in mind that this will release
+ * *all* VMA write locks, including ones from further up the stack.
+ */
static inline void vma_end_write_all(struct mm_struct *mm)
{
mmap_assert_write_locked(mm);
- /* No races during update due to exclusive mmap_lock being held */
- WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1);
+ /*
+ * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
+ * mmap_lock being held.
+ * We need RELEASE semantics here to ensure that preceding stores into
+ * the VMA take effect before we unlock it with this store.
+ * Pairs with ACQUIRE semantics in vma_start_read().
+ */
+ smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
}
#else
static inline void vma_end_write_all(struct mm_struct *mm) {}
@@ -112,16 +126,6 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm)
return ret;
}
-static inline bool mmap_write_trylock(struct mm_struct *mm)
-{
- bool ret;
-
- __mmap_lock_trace_start_locking(mm, true);
- ret = down_write_trylock(&mm->mmap_lock) != 0;
- __mmap_lock_trace_acquire_returned(mm, true, ret);
- return ret;
-}
-
static inline void mmap_write_unlock(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, true);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index c726ea781255..f34407cc2788 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -32,6 +32,7 @@ struct mmc_csd {
unsigned int r2w_factor;
unsigned int max_dtr;
unsigned int erase_size; /* In sectors */
+ unsigned int wp_grp_size;
unsigned int read_blkbits;
unsigned int write_blkbits;
unsigned int capacity;
@@ -52,9 +53,6 @@ struct mmc_ext_csd {
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
- u8 max_packed_writes;
- u8 max_packed_reads;
- u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
unsigned int generic_cmd6_time; /* Units: 10ms */
@@ -294,7 +292,10 @@ struct mmc_card {
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
+#define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
+#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
+ bool written_flag; /* Indicates eMMC has been written since power on */
bool reenable_cmdq; /* Re-enable Command Queue */
unsigned int erase_size; /* erase size in sectors */
@@ -303,6 +304,7 @@ struct mmc_card {
unsigned int eg_boundary; /* don't cross erase-group boundaries */
unsigned int erase_arg; /* erase / trim / discard */
u8 erased_byte; /* value of erased bytes */
+ unsigned int wp_grp_size; /* write group size in sectors */
u32 raw_cid[4]; /* raw card CID */
u32 raw_csd[4]; /* raw card CSD */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 6efec0b9820c..2c7928a50907 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -27,7 +27,6 @@ struct mmc_command {
u32 opcode;
u32 arg;
#define MMC_CMD23_ARG_REL_WR (1 << 31)
-#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
u32 resp[4];
unsigned int flags; /* expected response type */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 461d1543893b..2f445c651742 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -184,6 +184,12 @@ struct mmc_host_ops {
/* Execute HS400 tuning depending host driver */
int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card);
+ /* Optional callback to prepare for SD high-speed tuning */
+ int (*prepare_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
+
+ /* Optional callback to execute SD high-speed tuning */
+ int (*execute_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
+
/* Prepare switch to DDR during the HS400 init sequence */
int (*hs400_prepare_ddr)(struct mmc_host *host);
@@ -520,6 +526,7 @@ struct mmc_host {
/* Host Software Queue support */
bool hsq_enabled;
+ int hsq_depth;
u32 err_stats[MMC_ERR_MAX];
unsigned long private[] ____cacheline_aligned;
@@ -665,6 +672,8 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
host->err_stats[stat] += 1;
}
+int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp);
+int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 6f7993803ee7..cf2bcb5da30d 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -257,8 +257,6 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
-#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
-#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
@@ -321,8 +319,6 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
-#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
-#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */
#define EXT_CSD_HPI_FEATURES 503 /* RO */
@@ -402,18 +398,12 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
-#define EXT_CSD_PACKED_EVENT_EN BIT(3)
-
/*
* EXCEPTION_EVENT_STATUS field
*/
#define EXT_CSD_URGENT_BKOPS BIT(0)
#define EXT_CSD_DYNCAP_NEEDED BIT(1)
#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2)
-#define EXT_CSD_PACKED_FAILURE BIT(3)
-
-#define EXT_CSD_PACKED_GENERIC_ERROR BIT(0)
-#define EXT_CSD_PACKED_INDEXED_ERROR BIT(1)
/*
* BKOPS status level
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index c653accdc7fd..7fada7a714fe 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -121,7 +121,8 @@
#define SDIO_DEVICE_ID_REALTEK_RTW8822BS 0xb822
#define SDIO_DEVICE_ID_REALTEK_RTW8821CS 0xc821
#define SDIO_DEVICE_ID_REALTEK_RTW8822CS 0xc822
-#define SDIO_DEVICE_ID_REALTEK_RTW8723DS 0xd723
+#define SDIO_DEVICE_ID_REALTEK_RTW8723DS_2ANT 0xd723
+#define SDIO_DEVICE_ID_REALTEK_RTW8723DS_1ANT 0xd724
#define SDIO_DEVICE_ID_REALTEK_RTW8821DS 0xd821
#define SDIO_VENDOR_ID_SIANO 0x039a
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index b8728d11c949..7c3e7b0b0e8f 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -8,10 +8,12 @@
struct page;
struct vm_area_struct;
struct mm_struct;
+struct vma_iterator;
void dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
+void vma_iter_dump_tree(const struct vma_iterator *vmi);
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
@@ -74,6 +76,17 @@ void dump_mm(const struct mm_struct *mm);
} \
unlikely(__ret_warn_once); \
})
+#define VM_WARN_ON_ONCE_MM(cond, mm) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_mm(mm); \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -90,6 +103,7 @@ void dump_mm(const struct mm_struct *mm);
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 64a3e051c3c4..f349e08a9dfe 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -187,27 +187,27 @@ struct mmu_notifier_ops {
const struct mmu_notifier_range *range);
/*
- * invalidate_range() is either called between
- * invalidate_range_start() and invalidate_range_end() when the
- * VM has to free pages that where unmapped, but before the
- * pages are actually freed, or outside of _start()/_end() when
- * a (remote) TLB is necessary.
+ * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
+ * which shares page-tables with the CPU. The
+ * invalidate_range_start()/end() callbacks should not be implemented as
+ * invalidate_secondary_tlbs() already catches the points in time when
+ * an external TLB needs to be flushed.
*
- * If invalidate_range() is used to manage a non-CPU TLB with
- * shared page-tables, it not necessary to implement the
- * invalidate_range_start()/end() notifiers, as
- * invalidate_range() already catches the points in time when an
- * external TLB range needs to be flushed. For more in depth
- * discussion on this see Documentation/mm/mmu_notifier.rst
+ * This requires arch_invalidate_secondary_tlbs() to be called while
+ * holding the ptl spin-lock and therefore this callback is not allowed
+ * to sleep.
*
- * Note that this function might be called with just a sub-range
- * of what was passed to invalidate_range_start()/end(), if
- * called between those functions.
+ * This is called by architecture code whenever invalidating a TLB
+ * entry. It is assumed that any secondary TLB has the same rules for
+ * when invalidations are required. If this is not the case architecture
+ * code will need to call this explicitly when required for secondary
+ * TLB invalidation.
*/
- void (*invalidate_range)(struct mmu_notifier *subscription,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
+ void (*arch_invalidate_secondary_tlbs)(
+ struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* These callbacks are used with the get/put interface to manage the
@@ -395,10 +395,9 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
-extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
- bool only_end);
-extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
- unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
+extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
extern bool
mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
@@ -460,7 +459,14 @@ mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
-static inline int
+/*
+ * This version of mmu_notifier_invalidate_range_start() avoids blocking, but it
+ * can return an error if a notifier can't proceed without blocking, in which
+ * case you're not allowed to modify PTEs in the specified range.
+ *
+ * This is mainly intended for OOM handling.
+ */
+static inline int __must_check
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
int ret = 0;
@@ -481,21 +487,14 @@ mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
might_sleep();
if (mm_has_notifiers(range->mm))
- __mmu_notifier_invalidate_range_end(range, false);
-}
-
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
- if (mm_has_notifiers(range->mm))
- __mmu_notifier_invalidate_range_end(range, true);
+ __mmu_notifier_invalidate_range_end(range);
}
-static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range(mm, start, end);
+ __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
@@ -582,45 +581,6 @@ static inline void mmu_notifier_range_init_owner(
__young; \
})
-#define ptep_clear_flush_notify(__vma, __address, __ptep) \
-({ \
- unsigned long ___addr = __address & PAGE_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pte_t ___pte; \
- \
- ___pte = ptep_clear_flush(__vma, __address, __ptep); \
- mmu_notifier_invalidate_range(___mm, ___addr, \
- ___addr + PAGE_SIZE); \
- \
- ___pte; \
-})
-
-#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pmd_t ___pmd; \
- \
- ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
- mmu_notifier_invalidate_range(___mm, ___haddr, \
- ___haddr + HPAGE_PMD_SIZE); \
- \
- ___pmd; \
-})
-
-#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pud_t ___pud; \
- \
- ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
- mmu_notifier_invalidate_range(___mm, ___haddr, \
- ___haddr + HPAGE_PUD_SIZE); \
- \
- ___pud; \
-})
-
/*
* set_pte_at_notify() sets the pte _after_ running the notifier.
* This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -711,12 +671,7 @@ void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
}
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
-}
-
-static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a4889c9d4055..a497f189d988 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -22,18 +22,21 @@
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/local_lock.h>
+#include <linux/zswap.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
-#define MAX_ORDER 10
+#define MAX_PAGE_ORDER 10
#else
-#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
+#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
-#define MAX_ORDER_NR_PAGES (1 << MAX_ORDER)
+#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
+#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
+
/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
* costly to service. That is between allocation orders which should
@@ -95,7 +98,7 @@ static inline bool migratetype_is_mergeable(int mt)
}
#define for_each_migratetype_order(order, type) \
- for (order = 0; order <= MAX_ORDER; order++) \
+ for (order = 0; order < NR_PAGE_ORDERS; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled;
@@ -105,6 +108,9 @@ extern int page_group_by_mobility_disabled;
#define get_pageblock_migratetype(page) \
get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
+#define folio_migratetype(folio) \
+ get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
+ MIGRATETYPE_MASK)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
@@ -143,6 +149,9 @@ enum zone_stat_item {
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
NR_FREE_CMA_PAGES,
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ NR_UNACCEPTED,
+#endif
NR_VM_ZONE_STAT_ITEMS };
enum node_stat_item {
@@ -201,6 +210,10 @@ enum node_stat_item {
PGPROMOTE_SUCCESS, /* promote successfully */
PGPROMOTE_CANDIDATE, /* candidate pages to promote */
#endif
+ /* PGDEMOTE_*: pages demoted */
+ PGDEMOTE_KSWAPD,
+ PGDEMOTE_DIRECT,
+ PGDEMOTE_KHUGEPAGED,
NR_VM_NODE_STAT_ITEMS
};
@@ -290,9 +303,21 @@ static inline bool is_active_lru(enum lru_list lru)
#define ANON_AND_FILE 2
enum lruvec_flags {
- LRUVEC_CONGESTED, /* lruvec has many dirty pages
- * backed by a congested BDI
- */
+ /*
+ * An lruvec has many dirty pages backed by a congested BDI:
+ * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim.
+ * It can be cleared by cgroup reclaim or kswapd.
+ * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim.
+ * It can only be cleared by kswapd.
+ *
+ * Essentially, kswapd can unthrottle an lruvec throttled by cgroup
+ * reclaim, but not vice versa. This only applies to the root cgroup.
+ * The goal is to prevent cgroup reclaim on the root cgroup (e.g.
+ * memory.reclaim) to unthrottle an unbalanced node (that was throttled
+ * by kswapd).
+ */
+ LRUVEC_CGROUP_CONGESTED,
+ LRUVEC_NODE_CONGESTED,
};
#endif /* !__GENERATING_BOUNDS_H */
@@ -417,14 +442,12 @@ struct lru_gen_folio {
atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
/* whether the multi-gen LRU is enabled */
bool enabled;
-#ifdef CONFIG_MEMCG
/* the memcg generation this lru_gen_folio belongs to */
u8 gen;
/* the list segment this lru_gen_folio belongs to */
u8 seg;
/* per-node lru_gen_folio list for global reclaim */
struct hlist_nulls_node list;
-#endif
};
enum {
@@ -470,11 +493,6 @@ struct lru_gen_mm_walk {
bool force_scan;
};
-void lru_gen_init_lruvec(struct lruvec *lruvec);
-void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
-
-#ifdef CONFIG_MEMCG
-
/*
* For each node, memcgs are divided into two generations: the old and the
* young. For each generation, memcgs are randomly sharded into multiple bins
@@ -487,33 +505,37 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
* the old generation, is incremented when all its bins become empty.
*
* There are four operations:
- * 1. MEMCG_LRU_HEAD, which moves an memcg to the head of a random bin in its
+ * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
* current generation (old or young) and updates its "seg" to "head";
- * 2. MEMCG_LRU_TAIL, which moves an memcg to the tail of a random bin in its
+ * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
* current generation (old or young) and updates its "seg" to "tail";
- * 3. MEMCG_LRU_OLD, which moves an memcg to the head of a random bin in the old
+ * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
* generation, updates its "gen" to "old" and resets its "seg" to "default";
- * 4. MEMCG_LRU_YOUNG, which moves an memcg to the tail of a random bin in the
+ * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
* young generation, updates its "gen" to "young" and resets its "seg" to
* "default".
*
* The events that trigger the above operations are:
* 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
- * 2. The first attempt to reclaim an memcg below low, which triggers
+ * 2. The first attempt to reclaim a memcg below low, which triggers
* MEMCG_LRU_TAIL;
- * 3. The first attempt to reclaim an memcg below reclaimable size threshold,
- * which triggers MEMCG_LRU_TAIL;
- * 4. The second attempt to reclaim an memcg below reclaimable size threshold,
- * which triggers MEMCG_LRU_YOUNG;
- * 5. Attempting to reclaim an memcg below min, which triggers MEMCG_LRU_YOUNG;
+ * 3. The first attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_TAIL;
+ * 4. The second attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_YOUNG;
+ * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
* 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
- * 7. Offlining an memcg, which triggers MEMCG_LRU_OLD.
+ * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
*
- * Note that memcg LRU only applies to global reclaim, and the round-robin
- * incrementing of their max_seq counters ensures the eventual fairness to all
- * eligible memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * Notes:
+ * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
+ * of their max_seq counters ensures the eventual fairness to all eligible
+ * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * 2. There are only two valid generations: old (seq) and young (seq+1).
+ * MEMCG_NR_GENS is set to three so that when reading the generation counter
+ * locklessly, a stale value (seq-1) does not wraparound to young.
*/
-#define MEMCG_NR_GENS 2
+#define MEMCG_NR_GENS 3
#define MEMCG_NR_BINS 8
struct lru_gen_memcg {
@@ -528,26 +550,15 @@ struct lru_gen_memcg {
};
void lru_gen_init_pgdat(struct pglist_data *pgdat);
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
void lru_gen_init_memcg(struct mem_cgroup *memcg);
void lru_gen_exit_memcg(struct mem_cgroup *memcg);
void lru_gen_online_memcg(struct mem_cgroup *memcg);
void lru_gen_offline_memcg(struct mem_cgroup *memcg);
void lru_gen_release_memcg(struct mem_cgroup *memcg);
-void lru_gen_soft_reclaim(struct lruvec *lruvec);
-
-#else /* !CONFIG_MEMCG */
-
-#define MEMCG_NR_GENS 1
-
-struct lru_gen_memcg {
-};
-
-static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
-{
-}
-
-#endif /* CONFIG_MEMCG */
+void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
#else /* !CONFIG_LRU_GEN */
@@ -563,8 +574,6 @@ static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
{
}
-#ifdef CONFIG_MEMCG
-
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
}
@@ -585,12 +594,10 @@ static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
{
}
-static inline void lru_gen_soft_reclaim(struct lruvec *lruvec)
+static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
{
}
-#endif /* CONFIG_MEMCG */
-
#endif /* CONFIG_LRU_GEN */
struct lruvec {
@@ -613,16 +620,17 @@ struct lruvec {
#ifdef CONFIG_LRU_GEN
/* evictable pages divided into generations */
struct lru_gen_folio lrugen;
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
/* to concurrently iterate lru_gen_mm_list */
struct lru_gen_mm_state mm_state;
#endif
+#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
+ struct zswap_lruvec_state zswap_lruvec_state;
};
-/* Isolate unmapped pages */
-#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
/* Isolate unevictable pages */
@@ -658,16 +666,34 @@ enum zone_watermarks {
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
-/* Fields and list protected by pagesets local_lock in page_alloc.c */
+/*
+ * Flags used in pcp->flags field.
+ *
+ * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
+ * previous page freeing. To avoid to drain PCP for an accident
+ * high-order page freeing.
+ *
+ * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
+ * draining PCP for consecutive high-order pages freeing without
+ * allocation if data cache slice of CPU is large enough. To reduce
+ * zone lock contention and keep cache-hot pages reusing.
+ */
+#define PCPF_PREV_FREE_HIGH_ORDER BIT(0)
+#define PCPF_FREE_HIGH_BATCH BIT(1)
+
struct per_cpu_pages {
spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
+ int high_min; /* min high watermark */
+ int high_max; /* max high watermark */
int batch; /* chunk size for buddy add/remove */
- short free_factor; /* batch scaling factor during free */
+ u8 flags; /* protected by pcp->lock */
+ u8 alloc_factor; /* batch scaling factor during allocate */
#ifdef CONFIG_NUMA
- short expire; /* When 0, remote pagesets are drained */
+ u8 expire; /* When 0, remote pagesets are drained */
#endif
+ short free_count; /* consecutive free count */
/* Lists of pages, one per migrate type stored on the pcp-lists */
struct list_head lists[NR_PCP_LISTS];
@@ -820,7 +846,8 @@ struct zone {
* the high and batch values are copied to individual pagesets for
* faster access
*/
- int pageset_high;
+ int pageset_high_min;
+ int pageset_high_max;
int pageset_batch;
#ifndef CONFIG_SPARSEMEM
@@ -908,7 +935,12 @@ struct zone {
CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
- struct free_area free_area[MAX_ORDER + 1];
+ struct free_area free_area[NR_PAGE_ORDERS];
+
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
+ struct list_head unaccepted_pages;
+#endif
/* zone flags, see below */
unsigned long flags;
@@ -976,6 +1008,7 @@ enum zone_flags {
* Cleared when kswapd is woken.
*/
ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
+ ZONE_BELOW_HIGH, /* zone is below high watermark. */
};
static inline unsigned long zone_managed_pages(struct zone *zone)
@@ -1116,6 +1149,11 @@ static inline bool is_zone_movable_page(const struct page *page)
{
return page_zonenum(page) == ZONE_MOVABLE;
}
+
+static inline bool folio_is_zone_movable(const struct folio *folio)
+{
+ return folio_zonenum(folio) == ZONE_MOVABLE;
+}
#endif
/*
@@ -1512,27 +1550,6 @@ static inline bool has_managed_dma(void)
}
#endif
-/* These two functions are used to setup the per zone pages min values */
-struct ctl_table;
-
-int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
- size_t *, loff_t *);
-extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
-int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
- size_t *, loff_t *);
-int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-int numa_zonelist_order_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-extern int percpu_pagelist_high_fraction;
-extern char numa_zonelist_order[];
-#define NUMA_ZONELIST_ORDER_LEN 16
#ifndef CONFIG_NUMA
@@ -1731,8 +1748,8 @@ static inline bool movable_only_nodes(nodemask_t *nodes)
#define SECTION_BLOCKFLAGS_BITS \
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
-#if (MAX_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
-#error Allocator MAX_ORDER exceeds SECTION_SIZE
+#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
+#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE
#endif
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
@@ -1764,6 +1781,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
+ struct rcu_head rcu;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
#endif
@@ -1957,7 +1975,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
- return test_bit(idx, ms->usage->subsection_map);
+ return test_bit(idx, READ_ONCE(ms->usage)->subsection_map);
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
@@ -1981,6 +1999,7 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
static inline int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;
+ int ret;
/*
* Ensure the upper PAGE_SHIFT bits are clear in the
@@ -1994,13 +2013,19 @@ static inline int pfn_valid(unsigned long pfn)
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
ms = __pfn_to_section(pfn);
- if (!valid_section(ms))
+ rcu_read_lock_sched();
+ if (!valid_section(ms)) {
+ rcu_read_unlock_sched();
return 0;
+ }
/*
* Traditionally early sections always returned pfn_valid() for
* the entire section-sized span.
*/
- return early_section(ms) || pfn_section_valid(ms, pfn);
+ ret = early_section(ms) || pfn_section_valid(ms, pfn);
+ rcu_read_unlock_sched();
+
+ return ret;
}
#endif
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
index 057c89867aa2..cd4d5c8781f5 100644
--- a/include/linux/mnt_idmapping.h
+++ b/include/linux/mnt_idmapping.h
@@ -115,6 +115,9 @@ static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
int vfsgid_in_group_p(vfsgid_t vfsgid);
+struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap);
+void mnt_idmap_put(struct mnt_idmap *idmap);
+
vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns, kuid_t kuid);
@@ -241,7 +244,4 @@ static inline kgid_t mapped_fsgid(struct mnt_idmap *idmap,
return from_vfsgid(idmap, fs_userns, VFSGIDT_INIT(current_fsgid()));
}
-bool check_fsmapping(const struct mnt_idmap *idmap,
- const struct super_block *sb);
-
#endif /* _LINUX_MNT_IDMAPPING_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ccaaeda792c0..f458469c5ce5 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -221,6 +221,19 @@ struct acpi_device_id {
__u32 cls_msk;
};
+/**
+ * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
+ * the PCI-defined class-code information
+ *
+ * @_cls : the class, subclass, prog-if triple for this device
+ * @_msk : the class mask for this device
+ *
+ * This macro is used to create a struct acpi_device_id that matches a
+ * specific PCI class. The .id and .driver_data fields will be left
+ * initialized with the default value.
+ */
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
+
#define PNP_ID_LEN 8
#define PNP_MAX_DEVICES 8
@@ -912,10 +925,22 @@ struct ishtp_device_id {
kernel_ulong_t driver_data;
};
+#define CDX_ANY_ID (0xFFFF)
+
+enum {
+ CDX_ID_F_VFIO_DRIVER_OVERRIDE = 1,
+};
+
/**
* struct cdx_device_id - CDX device identifier
* @vendor: Vendor ID
* @device: Device ID
+ * @subvendor: Subsystem vendor ID (or CDX_ANY_ID)
+ * @subdevice: Subsystem device ID (or CDX_ANY_ID)
+ * @class: Device class
+ * Most drivers do not need to specify class/class_mask
+ * as vendor/device is normally sufficient.
+ * @class_mask: Limit which sub-fields of the class field are compared.
* @override_only: Match only when dev->driver_override is this driver.
*
* Type of entries in the "device Id" table for CDX devices supported by
@@ -924,7 +949,15 @@ struct ishtp_device_id {
struct cdx_device_id {
__u16 vendor;
__u16 device;
+ __u16 subvendor;
+ __u16 subdevice;
+ __u32 class;
+ __u32 class_mask;
__u32 override_only;
};
+struct vchiq_device_id {
+ char name[32];
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 9e56763dff81..96bc462872c0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -540,6 +540,8 @@ struct module {
struct static_call_site *static_call_sites;
#endif
#if IS_ENABLED(CONFIG_KUNIT)
+ int num_kunit_init_suites;
+ struct kunit_suite **kunit_init_suites;
int num_kunit_suites;
struct kunit_suite **kunit_suites;
#endif
@@ -668,7 +670,7 @@ extern void __module_get(struct module *module);
* @module: the module we should check for
*
* Only try to get a module reference count if the module is not being removed.
- * This call will fail if the module is already being removed.
+ * This call will fail if the module is in the process of being removed.
*
* Care must also be taken to ensure the module exists and is alive prior to
* usage of this call. This can be gauranteed through two means:
@@ -968,15 +970,6 @@ static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
return -ERANGE;
}
-static inline int lookup_module_symbol_attrs(unsigned long addr,
- unsigned long *size,
- unsigned long *offset,
- char *modname,
- char *name)
-{
- return -ERANGE;
-}
-
static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
char *type, char *name,
char *module_name, int *exported)
diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h
index 7ace7ba30203..77c9895b9ddb 100644
--- a/include/linux/module_symbol.h
+++ b/include/linux/module_symbol.h
@@ -3,15 +3,13 @@
#define _LINUX_MODULE_SYMBOL_H
/* This ignores the intensely annoying "mapping symbols" found in ELF files. */
-static inline int is_mapping_symbol(const char *str)
+static inline bool is_mapping_symbol(const char *str)
{
if (str[0] == '.' && str[1] == 'L')
return true;
if (str[0] == 'L' && str[1] == '0')
return true;
- return str[0] == '$' &&
- (str[1] == 'a' || str[1] == 'd' || str[1] == 't' || str[1] == 'x')
- && (str[2] == '\0' || str[2] == '.');
+ return str[0] == '$';
}
#endif /* _LINUX_MODULE_SYMBOL_H */
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 03be088fb439..001b2ce83832 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -42,6 +42,11 @@ bool module_init_section(const char *name);
*/
bool module_exit_section(const char *name);
+/* Describes whether within_module_init() will consider this an init section
+ * or not. This behaviour changes with CONFIG_MODULE_UNLOAD.
+ */
+bool module_init_layout_section(const char *sname);
+
/*
* Apply the given relocation to the (simplified) ELF. Return -error
* or 0.
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 962cd41a2cb5..bfb85fd13e1f 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -276,7 +276,7 @@ struct kparam_array
read-only sections (which is part of respective UNIX ABI on these
platforms). So 'const' makes no sense and even causes compile failures
with some compilers. */
-#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64)
+#if defined(CONFIG_ALPHA) || defined(CONFIG_PPC64)
#define __moduleparam_const
#else
#define __moduleparam_const const
@@ -293,7 +293,11 @@ struct kparam_array
= { __param_str_##name, THIS_MODULE, ops, \
VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
-/* Obsolete - use module_param_cb() */
+/*
+ * Useful for describing a set/get pair used only once (i.e. for this
+ * parameter). For repeated set/get pairs (i.e. the same struct
+ * kernel_param_ops), use module_param_cb() instead.
+ */
#define module_param_call(name, _set, _get, arg, perm) \
static const struct kernel_param_ops __param_ops_##name = \
{ .flags = 0, .set = _set, .get = _get }; \
@@ -381,6 +385,8 @@ extern bool parameq(const char *name1, const char *name2);
*/
extern bool parameqn(const char *name1, const char *name2, size_t n);
+typedef int (*parse_unknown_fn)(char *param, char *val, const char *doing, void *arg);
+
/* Called on module insert or kernel boot */
extern char *parse_args(const char *name,
char *args,
@@ -388,9 +394,7 @@ extern char *parse_args(const char *name,
unsigned num,
s16 level_min,
s16 level_max,
- void *arg,
- int (*unknown)(char *param, char *val,
- const char *doing, void *arg));
+ void *arg, parse_unknown_fn unknown);
/* Called by module remove. */
#ifdef CONFIG_SYSFS
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 1ea326c368f7..c34c18b4e8f3 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -50,8 +50,7 @@ struct path;
#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \
- MNT_CURSOR)
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | MNT_ONRB)
#define MNT_INTERNAL 0x4000
@@ -65,7 +64,7 @@ struct path;
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
#define MNT_UMOUNT 0x8000000
-#define MNT_CURSOR 0x10000000
+#define MNT_ONRB 0x10000000
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
@@ -92,8 +91,8 @@ extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
extern struct vfsmount *clone_private_mount(const struct path *path);
-extern int __mnt_want_write(struct vfsmount *);
-extern void __mnt_drop_write(struct vfsmount *);
+int mnt_get_write_access(struct vfsmount *mnt);
+void mnt_put_write_access(struct vfsmount *mnt);
extern struct vfsmount *fc_mount(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
@@ -107,7 +106,6 @@ extern struct vfsmount *vfs_submount(const struct dentry *mountpoint,
extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
-extern dev_t name_to_dev_t(const char *name);
extern bool path_is_mountpoint(const struct path *path);
extern bool our_mnt(struct vfsmount *mnt);
@@ -124,4 +122,6 @@ extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
struct vfsmount *);
extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
+extern int cifs_root_data(char **dev, char **opts);
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h
index 79184948fab4..ac577699edfd 100644
--- a/include/linux/moxtet.h
+++ b/include/linux/moxtet.h
@@ -35,8 +35,6 @@ enum turris_mox_module_id {
#define MOXTET_NIRQS 16
-extern struct bus_type moxtet_type;
-
struct moxtet {
struct device *dev;
struct mutex lock;
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 80b8400ab8b2..4c5003afee6c 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -18,10 +18,11 @@ static inline int ip_mroute_opt(int opt)
int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
-int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+int ipmr_ioctl(struct sock *sk, int cmd, void *arg);
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
int ip_mr_init(void);
bool ipmr_rule_default(const struct fib_rule *rule);
+int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
#else
static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
sockptr_t optval, unsigned int optlen)
@@ -35,7 +36,7 @@ static inline int ip_mroute_getsockopt(struct sock *sk, int optname,
return -ENOPROTOOPT;
}
-static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
+static inline int ipmr_ioctl(struct sock *sk, int cmd, void *arg)
{
return -ENOIOCTLCMD;
}
@@ -54,6 +55,12 @@ static inline bool ipmr_rule_default(const struct fib_rule *rule)
{
return true;
}
+
+static inline int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ return 1;
+}
#endif
#define VIFF_STATIC 0x8000
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 8f2b307fb124..63ef5191cc57 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -29,10 +29,10 @@ struct sock;
extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
extern int ip6_mr_input(struct sk_buff *skb);
-extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip6_mr_init(void);
extern void ip6_mr_cleanup(void);
+int ip6mr_ioctl(struct sock *sk, int cmd, void *arg);
#else
static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
sockptr_t optval, unsigned int optlen)
@@ -48,7 +48,7 @@ int ip6_mroute_getsockopt(struct sock *sock,
}
static inline
-int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
+int ip6mr_ioctl(struct sock *sk, int cmd, void *arg)
{
return -ENOIOCTLCMD;
}
@@ -100,6 +100,27 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
#ifdef CONFIG_IPV6_MROUTE
bool mroute6_is_socket(struct net *net, struct sk_buff *skb);
extern int ip6mr_sk_done(struct sock *sk);
+static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ switch (cmd) {
+ /* These userspace buffers will be consumed by ip6mr_ioctl() */
+ case SIOCGETMIFCNT_IN6: {
+ struct sioc_mif_req6 buffer;
+
+ return sock_ioctl_inout(sk, cmd, arg, &buffer,
+ sizeof(buffer));
+ }
+ case SIOCGETSGCNT_IN6: {
+ struct sioc_sg_req6 buffer;
+
+ return sock_ioctl_inout(sk, cmd, arg, &buffer,
+ sizeof(buffer));
+ }
+ }
+
+ return 1;
+}
#else
static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
{
@@ -109,5 +130,11 @@ static inline int ip6mr_sk_done(struct sock *sk)
{
return 0;
}
+
+static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ return 1;
+}
#endif
#endif
diff --git a/include/linux/msi.h b/include/linux/msi.h
index a50ea79522f8..ddace8c34dcf 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -547,12 +547,6 @@ enum {
MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
/* Free MSI descriptors */
MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
- /*
- * Quirk to handle MSI implementations which do not provide
- * masking. Currently known to affect x86, but has to be partially
- * handled in the core MSI code.
- */
- MSI_FLAG_NOMASK_QUIRK = (1 << 7),
/* Mask for the generic functionality */
MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 15cc9b95e32b..6e471436bba5 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -34,7 +34,7 @@ struct mtd_blktrans_dev {
struct blk_mq_tag_set *tag_set;
spinlock_t queue_lock;
void *priv;
- fmode_t file_mode;
+ bool writable;
};
struct mtd_blktrans_ops {
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index d88bb56c18e2..947410faf9e2 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -287,7 +287,7 @@ struct cfi_private {
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
unsigned long quirks;
- struct flchip chips[]; /* per-chip data structure for each chip */
+ struct flchip chips[] __counted_by(numchips); /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
index 0b6b59f7cfbd..56047a4e54c9 100644
--- a/include/linux/mtd/jedec.h
+++ b/include/linux/mtd/jedec.h
@@ -21,6 +21,9 @@ struct jedec_ecc_info {
/* JEDEC features */
#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+/* JEDEC Optional Commands */
+#define JEDEC_OPT_CMD_READ_CACHE BIT(1)
+
struct nand_jedec_params {
/* rev info and features block */
/* 'J' 'E' 'S' 'D' */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 7c58c44662b8..914a9f974baa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -379,7 +379,7 @@ struct mtd_info {
struct module *owner;
struct device dev;
- int usecount;
+ struct kref refcnt;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
struct nvmem_device *otp_user_nvmem;
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index a7376f9beddf..55ab2e4d62f9 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -55,6 +55,7 @@
#define ONFI_SUBFEATURE_PARAM_LEN 4
/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_READ_CACHE BIT(1)
#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
struct nand_onfi_params {
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index 2e3f43788d48..0421f12156b5 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -24,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[];
+ struct flchip chips[] __counted_by(numchips);
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 5159d692f9ce..e84522e31301 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -225,6 +225,7 @@ struct gpio_desc;
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
* @supports_set_get_features: The NAND chip supports setting/getting features
+ * @supports_read_cache: The NAND chip supports read cache operations
* @set_feature_list: Bitmap of features that can be set
* @get_feature_list: Bitmap of features that can be get
* @onfi: ONFI specific parameters
@@ -233,6 +234,7 @@ struct nand_parameters {
/* Generic parameters */
const char *model;
bool supports_set_get_features;
+ bool supports_read_cache;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
@@ -1001,6 +1003,8 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
* @cs: the CS line to select for this NAND operation
+ * @deassert_wp: set to true when the operation requires the WP pin to be
+ * de-asserted (ERASE, PROG, ...)
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
@@ -1008,6 +1012,7 @@ struct nand_op_parser {
*/
struct nand_operation {
unsigned int cs;
+ bool deassert_wp;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
@@ -1019,6 +1024,14 @@ struct nand_operation {
.ninstrs = ARRAY_SIZE(_instrs), \
}
+#define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .deassert_wp = true, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
@@ -1102,6 +1115,7 @@ struct nand_controller_ops {
* the bus without restarting an entire read operation nor
* changing the column.
* @supported_op.cont_read: The controller supports sequential cache reads.
+ * @controller_wp: the controller is in charge of handling the WP pin.
*/
struct nand_controller {
struct mutex lock;
@@ -1110,6 +1124,7 @@ struct nand_controller {
unsigned int data_only_read: 1;
unsigned int cont_read: 1;
} supported_op;
+ bool controller_wp;
};
static inline void nand_controller_init(struct nand_controller *nfc)
@@ -1263,6 +1278,7 @@ struct nand_secure_region {
* @cont_read: Sequential page read internals
* @cont_read.ongoing: Whether a continuous read is ongoing or not
* @cont_read.first_page: Start of the continuous read operation
+ * @cont_read.pause_page: End of the current sequential cache read operation
* @cont_read.last_page: End of the continuous read operation
* @controller: The hardware controller structure which is shared among multiple
* independent devices
@@ -1319,6 +1335,7 @@ struct nand_chip {
struct {
bool ongoing;
unsigned int first_page;
+ unsigned int pause_page;
unsigned int last_page;
} cont_read;
@@ -1540,6 +1557,7 @@ int nand_reset_op(struct nand_chip *chip);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len);
int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 3e285c09d16d..badb4c1ac079 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -263,6 +263,7 @@ struct spinand_manufacturer {
extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
extern const struct spinand_manufacturer ato_spinand_manufacturer;
extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 8f226d460f51..7e208d46ba5b 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -19,6 +19,8 @@
#include <asm/processor.h>
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
+#include <linux/cleanup.h>
+#include <linux/mutex_types.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -32,49 +34,6 @@
#ifndef CONFIG_PREEMPT_RT
-/*
- * Simple, straightforward mutexes with strict semantics:
- *
- * - only one task can hold the mutex at a time
- * - only the owner can unlock the mutex
- * - multiple unlocks are not permitted
- * - recursive locking is not permitted
- * - a mutex object must be initialized via the API
- * - a mutex object must not be initialized via memset or copying
- * - task may not exit with mutex held
- * - memory areas where held locks reside must not be freed
- * - held mutexes must not be reinitialized
- * - mutexes may not be used in hardware or software interrupt
- * contexts such as tasklets and timers
- *
- * These semantics are fully enforced when DEBUG_MUTEXES is
- * enabled. Furthermore, besides enforcing the above rules, the mutex
- * debugging code also implements a number of additional features
- * that make lock debugging easier and faster:
- *
- * - uses symbolic names of mutexes, whenever they are printed in debug output
- * - point-of-acquire tracking, symbolic lookup of function names
- * - list of all locks held in the system, printout of them
- * - owner tracking
- * - detects self-recursing locks and prints out all relevant info
- * - detects multi-task circular deadlocks and prints out all affected
- * locks and tasks (and only those tasks)
- */
-struct mutex {
- atomic_long_t owner;
- raw_spinlock_t wait_lock;
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- struct optimistic_spin_queue osq; /* Spinner MCS lock */
-#endif
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_MUTEXES
- void *magic;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
#ifdef CONFIG_DEBUG_MUTEXES
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
@@ -130,14 +89,6 @@ extern bool mutex_is_locked(struct mutex *lock);
/*
* Preempt-RT variant based on rtmutexes.
*/
-#include <linux/rtmutex.h>
-
-struct mutex {
- struct rt_mutex_base rtmutex;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
#define __MUTEX_INITIALIZER(mutexname) \
{ \
@@ -219,4 +170,8 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
+DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
+DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
+
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
new file mode 100644
index 000000000000..fdf7f515fde8
--- /dev/null
+++ b/include/linux/mutex_types.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MUTEX_TYPES_H
+#define __LINUX_MUTEX_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/lockdep_types.h>
+#include <linux/osq_lock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#ifndef CONFIG_PREEMPT_RT
+
+/*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+ * - only one task can hold the mutex at a time
+ * - only the owner can unlock the mutex
+ * - multiple unlocks are not permitted
+ * - recursive locking is not permitted
+ * - a mutex object must be initialized via the API
+ * - a mutex object must not be initialized via memset or copying
+ * - task may not exit with mutex held
+ * - memory areas where held locks reside must not be freed
+ * - held mutexes must not be reinitialized
+ * - mutexes may not be used in hardware or software interrupt
+ * contexts such as tasklets and timers
+ *
+ * These semantics are fully enforced when DEBUG_MUTEXES is
+ * enabled. Furthermore, besides enforcing the above rules, the mutex
+ * debugging code also implements a number of additional features
+ * that make lock debugging easier and faster:
+ *
+ * - uses symbolic names of mutexes, whenever they are printed in debug output
+ * - point-of-acquire tracking, symbolic lookup of function names
+ * - list of all locks held in the system, printout of them
+ * - owner tracking
+ * - detects self-recursing locks and prints out all relevant info
+ * - detects multi-task circular deadlocks and prints out all affected
+ * locks and tasks (and only those tasks)
+ */
+struct mutex {
+ atomic_long_t owner;
+ raw_spinlock_t wait_lock;
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* Spinner MCS lock */
+#endif
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_MUTEXES
+ void *magic;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+#include <linux/rtmutex.h>
+
+struct mutex {
+ struct rt_mutex_base rtmutex;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#endif /* CONFIG_PREEMPT_RT */
+
+#endif /* __LINUX_MUTEX_TYPES_H */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 1463cbda4888..74e0cc14ebf8 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -66,6 +66,7 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne
extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
+extern struct dentry *user_path_locked_at(int , const char __user *, struct path *);
int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
struct path *parent, struct qstr *last, int *type,
const struct path *root);
@@ -92,6 +93,30 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
+/**
+ * mode_strip_umask - handle vfs umask stripping
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode to be created in @dir
+ *
+ * In most filesystems, umask stripping depends on whether or not the
+ * filesystem supports POSIX ACLs. If the filesystem doesn't support it umask
+ * stripping is done directly in here. If the filesystem does support POSIX
+ * ACLs umask stripping is deferred until the filesystem calls
+ * posix_acl_create().
+ *
+ * Some filesystems (like NFSv4) also want to avoid umask stripping by the
+ * VFS, but don't support POSIX ACLs. Those filesystems can set SB_I_NOUMASK
+ * to get this effect without declaring that they support POSIX ACLs.
+ *
+ * Returns: mode
+ */
+static inline umode_t __must_check mode_strip_umask(const struct inode *dir, umode_t mode)
+{
+ if (!IS_POSIXACL(dir) && !(dir->i_sb->s_iflags & SB_I_NOUMASK))
+ mode &= ~current_umask();
+ return mode;
+}
+
extern int __must_check nd_jump_link(const struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
@@ -112,7 +137,7 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
static inline bool
retry_estale(const long error, const unsigned int flags)
{
- return error == -ESTALE && !(flags & LOOKUP_REVAL);
+ return unlikely(error == -ESTALE && !(flags & LOOKUP_REVAL));
}
#endif /* _LINUX_NAMEI_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index b73ad8e3c212..c9b4a63791a4 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -43,6 +43,7 @@ struct net;
#define SOCK_PASSSEC 4
#define SOCK_SUPPORT_ZC 5
#define SOCK_CUSTOM_SOCKOPT 6
+#define SOCK_PASSPIDFD 7
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -122,7 +123,7 @@ struct socket {
struct file *file;
struct sock *sk;
- const struct proto_ops *ops;
+ const struct proto_ops *ops; /* Might change with IPV6_ADDRFORM or MPTCP. */
struct socket_wq wq;
};
@@ -206,10 +207,9 @@ struct proto_ops {
size_t total_len, int flags);
int (*mmap) (struct file *file, struct socket *sock,
struct vm_area_struct * vma);
- ssize_t (*sendpage) (struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
+ void (*splice_eof)(struct socket *sock);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);
@@ -220,8 +220,6 @@ struct proto_ops {
sk_read_actor_t recv_actor);
/* This is different from read_sock(), it reads an entire skb at a time. */
int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor);
- int (*sendpage_locked)(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
size_t size);
int (*set_rcvlowat)(struct sock *sk, int val);
@@ -339,10 +337,6 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
int flags);
int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
-int kernel_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h
index ed42bd5f639f..0aa4411528fc 100644
--- a/include/linux/net/intel/i40e_client.h
+++ b/include/linux/net/intel/i40e_client.h
@@ -45,7 +45,7 @@ struct i40e_qv_info {
struct i40e_qvlist_info {
u32 num_vectors;
- struct i40e_qv_info qv_info[];
+ struct i40e_qv_info qv_info[] __counted_by(num_vectors);
};
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
index fd67f3cc0c4b..eb01c37e71e0 100644
--- a/include/linux/net_tstamp.h
+++ b/include/linux/net_tstamp.h
@@ -5,12 +5,23 @@
#include <uapi/linux/net_tstamp.h>
+enum hwtstamp_source {
+ HWTSTAMP_SOURCE_NETDEV,
+ HWTSTAMP_SOURCE_PHYLIB,
+};
+
/**
* struct kernel_hwtstamp_config - Kernel copy of struct hwtstamp_config
*
* @flags: see struct hwtstamp_config
* @tx_type: see struct hwtstamp_config
* @rx_filter: see struct hwtstamp_config
+ * @ifr: pointer to ifreq structure from the original ioctl request, to pass to
+ * a legacy implementation of a lower driver
+ * @copied_to_user: request was passed to a legacy implementation which already
+ * copied the ioctl request back to user space
+ * @source: indication whether timestamps should come from the netdev or from
+ * an attached phylib PHY
*
* Prefer using this structure for in-kernel processing of hardware
* timestamping configuration, over the inextensible struct hwtstamp_config
@@ -20,6 +31,9 @@ struct kernel_hwtstamp_config {
int flags;
int tx_type;
int rx_filter;
+ struct ifreq *ifr;
+ bool copied_to_user;
+ enum hwtstamp_source source;
};
static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kernel_cfg,
@@ -30,4 +44,20 @@ static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kern
kernel_cfg->rx_filter = cfg->rx_filter;
}
+static inline void hwtstamp_config_from_kernel(struct hwtstamp_config *cfg,
+ const struct kernel_hwtstamp_config *kernel_cfg)
+{
+ cfg->flags = kernel_cfg->flags;
+ cfg->tx_type = kernel_cfg->tx_type;
+ cfg->rx_filter = kernel_cfg->rx_filter;
+}
+
+static inline bool kernel_hwtstamp_config_changed(const struct kernel_hwtstamp_config *a,
+ const struct kernel_hwtstamp_config *b)
+{
+ return a->flags != b->flags ||
+ a->tx_type != b->tx_type ||
+ a->rx_filter != b->rx_filter;
+}
+
#endif /* _LINUX_NET_TIMESTAMPING_H_ */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c2f0c6002a84..78a09af89e39 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -40,7 +40,6 @@
#include <net/dcbnl.h>
#endif
#include <net/netprio_cgroup.h>
-#include <net/xdp.h>
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
@@ -57,6 +56,7 @@
struct netpoll_info;
struct device;
struct ethtool_ops;
+struct kernel_hwtstamp_config;
struct phy_device;
struct dsa_port;
struct ip_tunnel_parm;
@@ -76,8 +76,12 @@ struct udp_tunnel_nic_info;
struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;
+struct xdp_frame;
+struct xdp_metadata_ops;
struct xdp_md;
+typedef u32 xdp_features_t;
+
void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -376,6 +380,7 @@ struct napi_struct {
/* control-path-only fields follow */
struct list_head dev_list;
struct hlist_node napi_hash_node;
+ int irq;
};
enum {
@@ -476,6 +481,29 @@ static inline bool napi_prefer_busy_poll(struct napi_struct *n)
return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
}
+/**
+ * napi_is_scheduled - test if NAPI is scheduled
+ * @n: NAPI context
+ *
+ * This check is "best-effort". With no locking implemented,
+ * a NAPI can be scheduled or terminate right after this check
+ * and produce not precise results.
+ *
+ * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
+ * should not be used normally and napi_schedule should be
+ * used instead.
+ *
+ * Use only if the driver really needs to check if a NAPI
+ * is scheduled for example in the context of delayed timer
+ * that can be skipped if a NAPI is already scheduled.
+ *
+ * Return True if NAPI is scheduled, False otherwise.
+ */
+static inline bool napi_is_scheduled(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_SCHED, &n->state);
+}
+
bool napi_schedule_prep(struct napi_struct *n);
/**
@@ -484,11 +512,18 @@ bool napi_schedule_prep(struct napi_struct *n);
*
* Schedule NAPI poll routine to be called if it is not already
* running.
+ * Return true if we schedule a NAPI or false if not.
+ * Refer to napi_schedule_prep() for additional reason on why
+ * a NAPI might not be scheduled.
*/
-static inline void napi_schedule(struct napi_struct *n)
+static inline bool napi_schedule(struct napi_struct *n)
{
- if (napi_schedule_prep(n))
+ if (napi_schedule_prep(n)) {
__napi_schedule(n);
+ return true;
+ }
+
+ return false;
}
/**
@@ -503,16 +538,6 @@ static inline void napi_schedule_irqoff(struct napi_struct *n)
__napi_schedule_irqoff(n);
}
-/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
-static inline bool napi_reschedule(struct napi_struct *napi)
-{
- if (napi_schedule_prep(napi)) {
- __napi_schedule(napi);
- return true;
- }
- return false;
-}
-
/**
* napi_complete_done - NAPI processing complete
* @n: NAPI context
@@ -639,6 +664,10 @@ struct netdev_queue {
#ifdef CONFIG_XDP_SOCKETS
struct xsk_buff_pool *pool;
#endif
+ /* NAPI instance for the queue
+ * Readers and writers must hold RTNL
+ */
+ struct napi_struct *napi;
/*
* write-mostly part
*/
@@ -782,32 +811,6 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
#endif
#endif /* CONFIG_RPS */
-/* This structure contains an instance of an RX queue. */
-struct netdev_rx_queue {
- struct xdp_rxq_info xdp_rxq;
-#ifdef CONFIG_RPS
- struct rps_map __rcu *rps_map;
- struct rps_dev_flow_table __rcu *rps_flow_table;
-#endif
- struct kobject kobj;
- struct net_device *dev;
- netdevice_tracker dev_tracker;
-
-#ifdef CONFIG_XDP_SOCKETS
- struct xsk_buff_pool *pool;
-#endif
-} ____cacheline_aligned_in_smp;
-
-/*
- * RX queue sysfs structures and functions.
- */
-struct rx_queue_attribute {
- struct attribute attr;
- ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
- ssize_t (*store)(struct netdev_rx_queue *queue,
- const char *buf, size_t len);
-};
-
/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
enum xps_map_type {
XPS_CPUS = 0,
@@ -939,6 +942,7 @@ struct net_device_path {
u8 queue;
u16 wcid;
u8 bss;
+ u8 amsdu;
} mtk_wdma;
};
};
@@ -1309,9 +1313,7 @@ struct netdev_net_notifier {
* struct net_device *dev,
* const unsigned char *addr, u16 vid)
* Deletes the FDB entry from dev coresponding to addr.
- * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[],
- * struct net_device *dev,
- * u16 vid,
+ * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
* struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
@@ -1325,6 +1327,9 @@ struct netdev_net_notifier {
* int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
* struct netlink_ext_ack *extack);
* Deletes the MDB entry from dev.
+ * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
+ * struct netlink_ext_ack *extack);
+ * Bulk deletes MDB entries from dev.
* int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
* struct netlink_callback *cb);
* Dumps MDB entries from dev. The first argument (marker) in the netlink
@@ -1418,6 +1423,16 @@ struct netdev_net_notifier {
* Get hardware timestamp based on normal/adjustable time or free running
* cycle counter. This function is required if physical clock supports a
* free running cycle counter.
+ *
+ * int (*ndo_hwtstamp_get)(struct net_device *dev,
+ * struct kernel_hwtstamp_config *kernel_config);
+ * Get the currently configured hardware timestamping parameters for the
+ * NIC device.
+ *
+ * int (*ndo_hwtstamp_set)(struct net_device *dev,
+ * struct kernel_hwtstamp_config *kernel_config,
+ * struct netlink_ext_ack *extack);
+ * Change the hardware timestamping parameters for NIC device.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1576,10 +1591,8 @@ struct net_device_ops {
struct net_device *dev,
const unsigned char *addr,
u16 vid, struct netlink_ext_ack *extack);
- int (*ndo_fdb_del_bulk)(struct ndmsg *ndm,
- struct nlattr *tb[],
+ int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
struct net_device *dev,
- u16 vid,
struct netlink_ext_ack *extack);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
@@ -1599,9 +1612,16 @@ struct net_device_ops {
int (*ndo_mdb_del)(struct net_device *dev,
struct nlattr *tb[],
struct netlink_ext_ack *extack);
+ int (*ndo_mdb_del_bulk)(struct net_device *dev,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
int (*ndo_mdb_dump)(struct net_device *dev,
struct sk_buff *skb,
struct netlink_callback *cb);
+ int (*ndo_mdb_get)(struct net_device *dev,
+ struct nlattr *tb[], u32 portid,
+ u32 seq,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags,
@@ -1652,12 +1672,11 @@ struct net_device_ops {
ktime_t (*ndo_get_tstamp)(struct net_device *dev,
const struct skb_shared_hwtstamps *hwtstamps,
bool cycles);
-};
-
-struct xdp_metadata_ops {
- int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
- int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
- enum xdp_rss_hash_type *rss_type);
+ int (*ndo_hwtstamp_get)(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config);
+ int (*ndo_hwtstamp_set)(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
};
/**
@@ -1708,6 +1727,9 @@ struct xdp_metadata_ops {
* @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
* skb_headlen(skb) == 0 (data starts from frag0)
* @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN
+ * @IFF_SEE_ALL_HWTSTAMP_REQUESTS: device wants to see calls to
+ * ndo_hwtstamp_set() for all timestamp requests regardless of source,
+ * even if those aren't HWTSTAMP_SOURCE_NETDEV.
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1743,6 +1765,7 @@ enum netdev_priv_flags {
IFF_NO_ADDRCONF = BIT_ULL(30),
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
IFF_CHANGE_PROTO_DOWN = BIT_ULL(32),
+ IFF_SEE_ALL_HWTSTAMP_REQUESTS = BIT_ULL(33),
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1783,6 +1806,13 @@ enum netdev_ml_priv_type {
ML_PRIV_CAN,
};
+enum netdev_stat_type {
+ NETDEV_PCPU_STAT_NONE,
+ NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
+ NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
+ NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
+};
+
/**
* struct net_device - The DEVICE structure.
*
@@ -1844,6 +1874,7 @@ enum netdev_ml_priv_type {
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
* @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
+ * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks.
* @ethtool_ops: Management operations
* @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
@@ -1930,8 +1961,7 @@ enum netdev_ml_priv_type {
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
- * @miniq_ingress: ingress/clsact qdisc specific data for
- * ingress processing
+ * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing
* @ingress_queue: XXX: need comments on this one
* @nf_hooks_ingress: netfilter hooks executed for ingress packets
* @broadcast: hw bcast address
@@ -1952,8 +1982,7 @@ enum netdev_ml_priv_type {
* @xps_maps: all CPUs/RXQs maps for XPS device
*
* @xps_maps: XXX: need comments on this one
- * @miniq_egress: clsact qdisc specific data for
- * egress processing
+ * @tcx_egress: BPF & clsact qdisc specific data for egress processing
* @nf_hooks_egress: netfilter hooks executed for egress packets
* @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
@@ -1979,10 +2008,14 @@ enum netdev_ml_priv_type {
*
* @ml_priv: Mid-layer private
* @ml_priv_type: Mid-layer private type
- * @lstats: Loopback statistics
- * @tstats: Tunnel statistics
- * @dstats: Dummy statistics
- * @vstats: Virtual ethernet statistics
+ *
+ * @pcpu_stat_type: Type of device statistics which the core should
+ * allocate/free: none, lstats, tstats, dstats. none
+ * means the driver is handling statistics allocation/
+ * freeing internally.
+ * @lstats: Loopback statistics: packets, bytes
+ * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
+ * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
*
* @garp_port: GARP
* @mrp_port: MRP
@@ -2045,6 +2078,8 @@ enum netdev_ml_priv_type {
* receive offload (GRO)
* @gro_ipv4_max_size: Maximum size of aggregated packet in generic
* receive offload (GRO), for IPv4.
+ * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP
+ * zero copy driver
*
* @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
* @linkwatch_dev_tracker: refcount tracker used by linkwatch.
@@ -2058,11 +2093,85 @@ enum netdev_ml_priv_type {
* SET_NETDEV_DEVLINK_PORT macro. This pointer is static
* during the time netdevice is registered.
*
+ * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
+ * where the clock is recovered.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
struct net_device {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/net_device.rst.
+ * Please update the document when adding new fields.
+ */
+
+ /* TX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_tx);
+ unsigned long long priv_flags;
+ const struct net_device_ops *netdev_ops;
+ const struct header_ops *header_ops;
+ struct netdev_queue *_tx;
+ netdev_features_t gso_partial_features;
+ unsigned int real_num_tx_queues;
+ unsigned int gso_max_size;
+ unsigned int gso_ipv4_max_size;
+ u16 gso_max_segs;
+ s16 num_tc;
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
+ unsigned int mtu;
+ unsigned short needed_headroom;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+#ifdef CONFIG_XPS
+ struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ struct nf_hook_entries __rcu *nf_hooks_egress;
+#endif
+#ifdef CONFIG_NET_XGRESS
+ struct bpf_mprog_entry __rcu *tcx_egress;
+#endif
+ __cacheline_group_end(net_device_read_tx);
+
+ /* TXRX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_txrx);
+ union {
+ struct pcpu_lstats __percpu *lstats;
+ struct pcpu_sw_netstats __percpu *tstats;
+ struct pcpu_dstats __percpu *dstats;
+ };
+ unsigned int flags;
+ unsigned short hard_header_len;
+ netdev_features_t features;
+ struct inet6_dev __rcu *ip6_ptr;
+ __cacheline_group_end(net_device_read_txrx);
+
+ /* RX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_rx);
+ struct bpf_prog __rcu *xdp_prog;
+ struct list_head ptype_specific;
+ int ifindex;
+ unsigned int real_num_rx_queues;
+ struct netdev_rx_queue *_rx;
+ unsigned long gro_flush_timeout;
+ int napi_defer_hard_irqs;
+ unsigned int gro_max_size;
+ unsigned int gro_ipv4_max_size;
+ rx_handler_func_t __rcu *rx_handler;
+ void __rcu *rx_handler_data;
+ possible_net_t nd_net;
+#ifdef CONFIG_NETPOLL
+ struct netpoll_info __rcu *npinfo;
+#endif
+#ifdef CONFIG_NET_XGRESS
+ struct bpf_mprog_entry __rcu *tcx_ingress;
+#endif
+ __cacheline_group_end(net_device_read_rx);
+
char name[IFNAMSIZ];
struct netdev_name_node *name_node;
struct dev_ifalias __rcu *ifalias;
@@ -2087,7 +2196,6 @@ struct net_device {
struct list_head unreg_list;
struct list_head close_list;
struct list_head ptype_all;
- struct list_head ptype_specific;
struct {
struct list_head upper;
@@ -2095,31 +2203,18 @@ struct net_device {
} adj_list;
/* Read-mostly cache-line for fast-path access */
- unsigned int flags;
xdp_features_t xdp_features;
- unsigned long long priv_flags;
- const struct net_device_ops *netdev_ops;
const struct xdp_metadata_ops *xdp_metadata_ops;
- int ifindex;
+ const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
unsigned short gflags;
- unsigned short hard_header_len;
- /* Note : dev->mtu is often read without holding a lock.
- * Writers usually hold RTNL.
- * It is recommended to use READ_ONCE() to annotate the reads,
- * and to use WRITE_ONCE() to annotate the writes.
- */
- unsigned int mtu;
- unsigned short needed_headroom;
unsigned short needed_tailroom;
- netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
- netdev_features_t gso_partial_features;
unsigned int min_mtu;
unsigned int max_mtu;
@@ -2157,8 +2252,6 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops;
#endif
- const struct header_ops *header_ops;
-
unsigned char operstate;
unsigned char link_mode;
@@ -2199,9 +2292,7 @@ struct net_device {
/* Protocol-specific pointers */
-
struct in_device __rcu *ip_ptr;
- struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
#endif
@@ -2236,26 +2327,13 @@ struct net_device {
/* Interface address info used in eth_type_trans() */
const unsigned char *dev_addr;
- struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
- unsigned int real_num_rx_queues;
-
- struct bpf_prog __rcu *xdp_prog;
- unsigned long gro_flush_timeout;
- int napi_defer_hard_irqs;
#define GRO_LEGACY_MAX_SIZE 65536u
/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
* and shinfo->gso_segs is a 16bit field.
*/
#define GRO_MAX_SIZE (8 * 65535u)
- unsigned int gro_max_size;
- unsigned int gro_ipv4_max_size;
- rx_handler_func_t __rcu *rx_handler;
- void __rcu *rx_handler_data;
-
-#ifdef CONFIG_NET_CLS_ACT
- struct mini_Qdisc __rcu *miniq_ingress;
-#endif
+ unsigned int xdp_zc_max_segs;
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
struct nf_hook_entries __rcu *nf_hooks_ingress;
@@ -2270,25 +2348,13 @@ struct net_device {
/*
* Cache lines mostly used on transmit path
*/
- struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
- unsigned int real_num_tx_queues;
struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
-#ifdef CONFIG_XPS
- struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
-#endif
-#ifdef CONFIG_NET_CLS_ACT
- struct mini_Qdisc __rcu *miniq_egress;
-#endif
-#ifdef CONFIG_NETFILTER_EGRESS
- struct nf_hook_entries __rcu *nf_hooks_egress;
-#endif
-
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
@@ -2327,21 +2393,11 @@ struct net_device {
bool needs_free_netdev;
void (*priv_destructor)(struct net_device *dev);
-#ifdef CONFIG_NETPOLL
- struct netpoll_info __rcu *npinfo;
-#endif
-
- possible_net_t nd_net;
-
/* mid-layer private */
void *ml_priv;
enum netdev_ml_priv_type ml_priv_type;
- union {
- struct pcpu_lstats __percpu *lstats;
- struct pcpu_sw_netstats __percpu *tstats;
- struct pcpu_dstats __percpu *dstats;
- };
+ enum netdev_stat_type pcpu_stat_type:8;
#if IS_ENABLED(CONFIG_GARP)
struct garp_port __rcu *garp_port;
@@ -2366,20 +2422,15 @@ struct net_device {
*/
#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
- unsigned int gso_max_size;
#define TSO_LEGACY_MAX_SIZE 65536
#define TSO_MAX_SIZE UINT_MAX
unsigned int tso_max_size;
- u16 gso_max_segs;
#define TSO_MAX_SEGS U16_MAX
u16 tso_max_segs;
- unsigned int gso_ipv4_max_size;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- s16 num_tc;
- struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE)
@@ -2414,6 +2465,14 @@ struct net_device {
struct rtnl_hw_stats64 *offload_xstats_l3;
struct devlink_port *devlink_port;
+
+#if IS_ENABLED(CONFIG_DPLL)
+ struct dpll_pin __rcu *dpll_pin;
+#endif
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ /** @page_pools: page pools created for this netdevice */
+ struct hlist_head page_pools;
+#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2618,6 +2677,15 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
+void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type,
+ struct napi_struct *napi);
+
+static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
+{
+ napi->irq = irq;
+}
+
/* Default NAPI poll() weight
* Device drivers are strongly advised to not use bigger value
*/
@@ -2734,6 +2802,16 @@ struct pcpu_sw_netstats {
struct u64_stats_sync syncp;
} __aligned(4 * sizeof(u64));
+struct pcpu_dstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+ struct u64_stats_sync syncp;
+} __aligned(8 * sizeof(u64));
+
struct pcpu_lstats {
u64_stats_t packets;
u64_stats_t bytes;
@@ -3016,6 +3094,9 @@ extern rwlock_t dev_base_lock; /* Device list lock */
if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
+#define for_each_netdev_dump(net, d, ifindex) \
+ xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex))
+
static inline struct net_device *next_net_device(struct net_device *dev)
{
struct list_head *lh;
@@ -3124,10 +3205,12 @@ struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
struct sock *sk);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *netdev_get_by_index(struct net *net, int ifindex,
+ netdevice_tracker *tracker, gfp_t gfp);
+struct net_device *netdev_get_by_name(struct net *net, const char *name,
+ netdevice_tracker *tracker, gfp_t gfp);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
-int dev_restart(struct net_device *dev);
-
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -3414,6 +3497,16 @@ static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue
#endif
}
+static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+ /* Non-BQL migrated drivers will return 0, too. */
+ return dql_avail(&txq->dql);
+#else
+ return 0;
+#endif
+}
+
/**
* netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
@@ -3826,24 +3919,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
int netif_set_real_num_queues(struct net_device *dev,
unsigned int txq, unsigned int rxq);
-static inline struct netdev_rx_queue *
-__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
-{
- return dev->_rx + rxq;
-}
-
-#ifdef CONFIG_SYSFS
-static inline unsigned int get_netdev_rx_queue_index(
- struct netdev_rx_queue *queue)
-{
- struct net_device *dev = queue->dev;
- int index = queue - dev->_rx;
-
- BUG_ON(index >= dev->num_rx_queues);
- return index;
-}
-#endif
-
int netif_get_num_default_rss_queues(void);
void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
@@ -3929,6 +4004,14 @@ int put_user_ifreq(struct ifreq *ifr, void __user *arg);
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
void __user *data, bool *need_copyout);
int dev_ifconf(struct net *net, struct ifconf __user *ifc);
+int generic_hwtstamp_get_lower(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_cfg);
+int generic_hwtstamp_set_lower(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_cfg,
+ struct netlink_ext_ack *extack);
+int dev_set_hwtstamp_phylib(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
unsigned int dev_get_flags(const struct net_device *);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
@@ -3957,6 +4040,7 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
+
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3997,32 +4081,19 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
return false;
}
-struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev);
-
-static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev)
-{
- /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
- struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
-
- if (likely(p))
- return p;
-
- return netdev_core_stats_alloc(dev);
-}
+void netdev_core_stats_inc(struct net_device *dev, u32 offset);
#define DEV_CORE_STATS_INC(FIELD) \
static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
{ \
- struct net_device_core_stats __percpu *p; \
- \
- p = dev_core_stats(dev); \
- if (p) \
- this_cpu_inc(p->FIELD); \
+ netdev_core_stats_inc(dev, \
+ offsetof(struct net_device_core_stats, FIELD)); \
}
DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)
DEV_CORE_STATS_INC(rx_nohandler)
DEV_CORE_STATS_INC(rx_otherhost_dropped)
+#undef DEV_CORE_STATS_INC
static __always_inline int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb,
@@ -4162,6 +4233,15 @@ static inline void netdev_ref_replace(struct net_device *odev,
void linkwatch_fire_event(struct net_device *dev);
/**
+ * linkwatch_sync_dev - sync linkwatch for the given device
+ * @dev: network device to sync linkwatch for
+ *
+ * Sync linkwatch for the given device, removing it from the
+ * pending work list (if queued).
+ */
+void linkwatch_sync_dev(struct net_device *dev);
+
+/**
* netif_carrier_ok - test if carrier present
* @dev: network device
*
@@ -4827,13 +4907,6 @@ int skb_crc32c_csum_help(struct sk_buff *skb);
int skb_csum_hwoffload_help(struct sk_buff *skb,
const netdev_features_t features);
-struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features, bool tx_path);
-struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
- netdev_features_t features, __be16 type);
-struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
- netdev_features_t features);
-
struct netdev_bonding_info {
ifslave slave;
ifbond master;
@@ -4856,11 +4929,6 @@ static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
}
#endif
-static inline
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
-{
- return __skb_gso_segment(skb, features, true);
-}
__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
static inline bool can_checksum_protocol(netdev_features_t features,
@@ -4987,6 +5055,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
netdev_features_t netif_skb_features(struct sk_buff *skb);
+void skb_warn_bad_offload(const struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
@@ -5035,19 +5104,6 @@ void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
void netif_inherit_tso_max(struct net_device *to,
const struct net_device *from);
-static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
- int pulled_hlen, u16 mac_offset,
- int mac_len)
-{
- skb->protocol = protocol;
- skb->encapsulation = 1;
- skb_push(skb, pulled_hlen);
- skb_reset_transport_header(skb);
- skb->mac_header = mac_offset;
- skb->network_header = skb->mac_header + mac_len;
- skb->mac_len = mac_len;
-}
-
static inline bool netif_is_macsec(const struct net_device *dev)
{
return dev->priv_flags & IFF_MACSEC;
@@ -5093,6 +5149,15 @@ static inline bool netif_is_l3_slave(const struct net_device *dev)
return dev->priv_flags & IFF_L3MDEV_SLAVE;
}
+static inline int dev_sdif(const struct net_device *dev)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ if (netif_is_l3_slave(dev))
+ return dev->ifindex;
+#endif
+ return 0;
+}
+
static inline bool netif_is_bridge_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_EBRIDGE;
@@ -5113,6 +5178,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev)
return dev->priv_flags & IFF_OVS_DATAPATH;
}
+static inline bool netif_is_any_bridge_master(const struct net_device *dev)
+{
+ return netif_is_bridge_master(dev) || netif_is_ovs_master(dev);
+}
+
static inline bool netif_is_any_bridge_port(const struct net_device *dev)
{
return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
@@ -5241,5 +5311,6 @@ extern struct net_device *blackhole_netdev;
#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
#define DEV_STATS_ADD(DEV, FIELD, VAL) \
atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
+#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 0762444e3767..ce660d51549b 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/static_key.h>
+#include <linux/module.h>
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
#include <linux/sockptr.h>
@@ -21,6 +22,16 @@ static inline int NF_DROP_GETERR(int verdict)
return -(verdict >> NF_VERDICT_QBITS);
}
+static __always_inline int
+NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err)
+{
+ BUILD_BUG_ON(err > 0xffff);
+
+ kfree_skb_reason(skb, reason);
+
+ return ((err << 16) | NF_STOLEN);
+}
+
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
@@ -463,6 +474,7 @@ struct nf_ct_hook {
const struct sk_buff *);
void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
void (*set_closing)(struct nf_conntrack *nfct);
+ int (*confirm)(struct sk_buff *skb);
};
extern const struct nf_ct_hook __rcu *nf_ct_hook;
@@ -481,7 +493,16 @@ struct nfnl_ct_hook {
};
extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
-/**
+struct nf_defrag_hook {
+ struct module *owner;
+ int (*enable)(struct net *net);
+ void (*disable)(struct net *net);
+};
+
+extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook;
+extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook;
+
+/*
* nf_skb_duplicated - TEE target has sent a packet
*
* When a xtables target sends a packet, the OUTPUT and POSTROUTING
@@ -492,7 +513,7 @@ extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
*/
DECLARE_PER_CPU(bool, nf_skb_duplicated);
-/**
+/*
* Contains bitmask of ctnetlink event subscribers, if any.
* Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
*/
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index e8c350a3ade1..e9f4f845d760 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -186,6 +186,8 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+ /* Cancel ongoing garbage collectors before destroying the set*/
+ void (*cancel_gc)(struct ip_set *set);
/* Region-locking is used */
bool region_lock;
};
@@ -242,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type);
/* A generic IP set */
struct ip_set {
+ /* For call_cru in destroy */
+ struct rcu_head rcu;
/* The name of the set */
char name[IPSET_MAXNAMELEN];
/* Lock protecting the set data */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index 9e937f64a1ad..81286c499325 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -34,10 +34,6 @@ struct nf_ct_h323_master {
int get_h225_addr(struct nf_conn *ct, unsigned char *data,
TransportAddress *taddr, union nf_inet_addr *addr,
__be16 *port);
-void nf_conntrack_h245_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
-void nf_conntrack_q931_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
struct nfct_h323_nat_hooks {
int (*set_h245_addr)(struct sk_buff *skb, unsigned int protoff,
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index f33aa6021364..34ce5d2f37a2 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -25,7 +25,6 @@ struct nf_ct_gre_keymap {
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
-void nf_ct_gre_keymap_flush(struct net *net);
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 625f491b95de..fb31312825ae 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,7 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 init[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index f980edfdd278..743475ca7e9d 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -42,7 +42,7 @@ static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
if (!nf_bridge)
return 0;
- return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
+ return nf_bridge->physinif;
}
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
@@ -56,11 +56,11 @@ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
}
static inline struct net_device *
-nf_bridge_get_physindev(const struct sk_buff *skb)
+nf_bridge_get_physindev(const struct sk_buff *skb, struct net *net)
{
const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- return nf_bridge ? nf_bridge->physindev : NULL;
+ return nf_bridge ? dev_get_by_index_rcu(net, nf_bridge->physinif) : NULL;
}
static inline struct net_device *
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 7834c0be2831..61aa48f46dd7 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -51,7 +51,7 @@ struct nf_ipv6_ops {
u32 (*cookie_init_sequence)(const struct ipv6hdr *iph,
const struct tcphdr *th, u16 *mssp);
int (*cookie_v6_check)(const struct ipv6hdr *iph,
- const struct tcphdr *th, __u32 cookie);
+ const struct tcphdr *th);
#endif
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
@@ -179,16 +179,16 @@ static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph,
}
static inline int nf_cookie_v6_check(const struct ipv6hdr *iph,
- const struct tcphdr *th, __u32 cookie)
+ const struct tcphdr *th)
{
#if IS_ENABLED(CONFIG_SYN_COOKIES)
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
if (v6_ops)
- return v6_ops->cookie_v6_check(iph, th, cookie);
+ return v6_ops->cookie_v6_check(iph, th);
#elif IS_BUILTIN(CONFIG_IPV6)
- return __cookie_v6_check(iph, th, cookie);
+ return __cookie_v6_check(iph, th);
#endif
#endif
return 0;
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index a1f3522daa69..100cbb261269 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -109,11 +109,18 @@ static inline int wait_on_page_fscache_killable(struct page *page)
return folio_wait_private_2_killable(page_folio(page));
}
+/* Marks used on xarray-based buffers */
+#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
+#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
+
enum netfs_io_source {
NETFS_FILL_WITH_ZEROES,
NETFS_DOWNLOAD_FROM_SERVER,
NETFS_READ_FROM_CACHE,
NETFS_INVALID_READ,
+ NETFS_UPLOAD_TO_SERVER,
+ NETFS_WRITE_TO_CACHE,
+ NETFS_INVALID_WRITE,
} __mode(byte);
typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
@@ -129,9 +136,57 @@ struct netfs_inode {
struct fscache_cookie *cache;
#endif
loff_t remote_i_size; /* Size of the remote file */
+ loff_t zero_point; /* Size after which we assume there's no data
+ * on the server */
+ unsigned long flags;
+#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
+#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
+#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
+#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */
+};
+
+/*
+ * A netfs group - for instance a ceph snap. This is marked on dirty pages and
+ * pages marked with a group must be flushed before they can be written under
+ * the domain of another group.
+ */
+struct netfs_group {
+ refcount_t ref;
+ void (*free)(struct netfs_group *netfs_group);
};
/*
+ * Information about a dirty page (attached only if necessary).
+ * folio->private
+ */
+struct netfs_folio {
+ struct netfs_group *netfs_group; /* Filesystem's grouping marker (or NULL). */
+ unsigned int dirty_offset; /* Write-streaming dirty data offset */
+ unsigned int dirty_len; /* Write-streaming dirty data length */
+};
+#define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */
+
+static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
+{
+ void *priv = folio_get_private(folio);
+
+ if ((unsigned long)priv & NETFS_FOLIO_INFO)
+ return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
+ return NULL;
+}
+
+static inline struct netfs_group *netfs_folio_group(struct folio *folio)
+{
+ struct netfs_folio *finfo;
+ void *priv = folio_get_private(folio);
+
+ finfo = netfs_folio_info(folio);
+ if (finfo)
+ return finfo->netfs_group;
+ return priv;
+}
+
+/*
* Resources required to do operations on a cache.
*/
struct netfs_cache_resources {
@@ -143,17 +198,24 @@ struct netfs_cache_resources {
};
/*
- * Descriptor for a single component subrequest.
+ * Descriptor for a single component subrequest. Each operation represents an
+ * individual read/write from/to a server, a cache, a journal, etc..
+ *
+ * The buffer iterator is persistent for the life of the subrequest struct and
+ * the pages it points to can be relied on to exist for the duration.
*/
struct netfs_io_subrequest {
struct netfs_io_request *rreq; /* Supervising I/O request */
+ struct work_struct work;
struct list_head rreq_link; /* Link in rreq->subrequests */
+ struct iov_iter io_iter; /* Iterator for this subrequest */
loff_t start; /* Where to start the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
+ unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */
enum netfs_io_source source; /* Where to read from/write to */
unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
@@ -168,6 +230,13 @@ enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+ NETFS_WRITEBACK, /* This write was triggered by writepages */
+ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
+ NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
+ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
+ NETFS_DIO_READ, /* This is a direct I/O read */
+ NETFS_DIO_WRITE, /* This is a direct I/O write */
+ nr__netfs_io_origin
} __mode(byte);
/*
@@ -175,19 +244,34 @@ enum netfs_io_origin {
* operations to a variety of data stores and then stitch the result together.
*/
struct netfs_io_request {
- struct work_struct work;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
struct inode *inode; /* The file being accessed */
struct address_space *mapping; /* The mapping being accessed */
+ struct kiocb *iocb; /* AIO completion vector */
struct netfs_cache_resources cache_resources;
+ struct list_head proc_link; /* Link in netfs_iorequests */
struct list_head subrequests; /* Contributory I/O operations */
+ struct iov_iter iter; /* Unencrypted-side iterator */
+ struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
void *netfs_priv; /* Private data for the netfs */
+ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
+ unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
unsigned int debug_id;
+ unsigned int rsize; /* Maximum read size (0 for none) */
+ unsigned int wsize; /* Maximum write size (0 for none) */
+ unsigned int subreq_counter; /* Next subreq->debug_index */
atomic_t nr_outstanding; /* Number of ops in progress */
atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */
size_t submitted; /* Amount submitted for I/O so far */
size_t len; /* Length of the request */
+ size_t upper_len; /* Length can be extended to here */
+ size_t transferred; /* Amount to be indicated as transferred */
short error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
+ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
loff_t i_size; /* Size of the file */
loff_t start; /* Start position */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
@@ -199,17 +283,25 @@ struct netfs_io_request {
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
+#define NETFS_RREQ_WRITE_TO_CACHE 7 /* Need to write to the cache */
+#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
+#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
+#define NETFS_RREQ_BLOCKED 10 /* We blocked */
const struct netfs_request_ops *netfs_ops;
+ void (*cleanup)(struct netfs_io_request *req);
};
/*
* Operations the network filesystem can/must provide to the helpers.
*/
struct netfs_request_ops {
+ unsigned int io_request_size; /* Alloc size for netfs_io_request struct */
+ unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */
int (*init_request)(struct netfs_io_request *rreq, struct file *file);
void (*free_request)(struct netfs_io_request *rreq);
- int (*begin_cache_operation)(struct netfs_io_request *rreq);
+ void (*free_subrequest)(struct netfs_io_subrequest *rreq);
+ /* Read request handling */
void (*expand_readahead)(struct netfs_io_request *rreq);
bool (*clamp_length)(struct netfs_io_subrequest *subreq);
void (*issue_read)(struct netfs_io_subrequest *subreq);
@@ -217,6 +309,14 @@ struct netfs_request_ops {
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
struct folio **foliop, void **_fsdata);
void (*done)(struct netfs_io_request *rreq);
+
+ /* Modification handling */
+ void (*update_i_size)(struct inode *inode, loff_t i_size);
+
+ /* Write request handling */
+ void (*create_write_requests)(struct netfs_io_request *wreq,
+ loff_t start, size_t len);
+ void (*invalidate_cache)(struct netfs_io_request *wreq);
};
/*
@@ -229,8 +329,7 @@ enum netfs_read_from_hole {
};
/*
- * Table of operations for access to a cache. This is obtained by
- * rreq->ops->begin_cache_operation().
+ * Table of operations for access to a cache.
*/
struct netfs_cache_ops {
/* End an operation */
@@ -265,8 +364,8 @@ struct netfs_cache_ops {
* actually do.
*/
int (*prepare_write)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size,
- bool no_space_allocated_yet);
+ loff_t *_start, size_t *_len, size_t upper_len,
+ loff_t i_size, bool no_space_allocated_yet);
/* Prepare an on-demand read operation, shortening it to a cached/uncached
* boundary as appropriate.
@@ -284,26 +383,62 @@ struct netfs_cache_ops {
loff_t *_data_start, size_t *_data_len);
};
+/* High-level read API. */
+ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+
+/* High-level write API */
+ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group);
+ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+ struct netfs_group *netfs_group);
+ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from);
+ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+/* Address operations API */
struct readahead_control;
void netfs_readahead(struct readahead_control *);
int netfs_read_folio(struct file *, struct folio *);
int netfs_write_begin(struct netfs_inode *, struct file *,
- struct address_space *, loff_t pos, unsigned int len,
- struct folio **, void **fsdata);
-
+ struct address_space *, loff_t pos, unsigned int len,
+ struct folio **, void **fsdata);
+int netfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
+bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
+int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
+void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+bool netfs_release_folio(struct folio *folio, gfp_t gfp);
+int netfs_launder_folio(struct folio *folio);
+
+/* VMA operations API. */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
+
+/* (Sub)request management API. */
void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
bool was_async, enum netfs_sreq_ref_trace what);
-void netfs_stats_show(struct seq_file *);
ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
struct iov_iter *new,
iov_iter_extraction_t extraction_flags);
-struct sg_table;
-ssize_t netfs_extract_iter_to_sg(struct iov_iter *iter, size_t len,
- struct sg_table *sgtable, unsigned int sg_max,
- iov_iter_extraction_t extraction_flags);
+size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs);
+struct netfs_io_subrequest *netfs_create_write_request(
+ struct netfs_io_request *wreq, enum netfs_io_source dest,
+ loff_t start, size_t len, work_func_t worker);
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
+ bool was_async);
+void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
+
+int netfs_start_io_read(struct inode *inode);
+void netfs_end_io_read(struct inode *inode);
+int netfs_start_io_write(struct inode *inode);
+void netfs_end_io_write(struct inode *inode);
+int netfs_start_io_direct(struct inode *inode);
+void netfs_end_io_direct(struct inode *inode);
/**
* netfs_inode - Get the netfs inode context from the inode
@@ -321,30 +456,44 @@ static inline struct netfs_inode *netfs_inode(struct inode *inode)
* netfs_inode_init - Initialise a netfslib inode context
* @ctx: The netfs inode to initialise
* @ops: The netfs's operations list
+ * @use_zero_point: True to use the zero_point read optimisation
*
* Initialise the netfs library context struct. This is expected to follow on
* directly from the VFS inode struct.
*/
static inline void netfs_inode_init(struct netfs_inode *ctx,
- const struct netfs_request_ops *ops)
+ const struct netfs_request_ops *ops,
+ bool use_zero_point)
{
ctx->ops = ops;
ctx->remote_i_size = i_size_read(&ctx->inode);
+ ctx->zero_point = LLONG_MAX;
+ ctx->flags = 0;
#if IS_ENABLED(CONFIG_FSCACHE)
ctx->cache = NULL;
#endif
+ /* ->releasepage() drives zero_point */
+ if (use_zero_point) {
+ ctx->zero_point = ctx->remote_i_size;
+ mapping_set_release_always(ctx->inode.i_mapping);
+ }
}
/**
* netfs_resize_file - Note that a file got resized
* @ctx: The netfs inode being resized
* @new_i_size: The new file size
+ * @changed_on_server: The change was applied to the server
*
* Inform the netfs lib that a file got resized so that it can adjust its state.
*/
-static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size)
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size,
+ bool changed_on_server)
{
- ctx->remote_i_size = new_i_size;
+ if (changed_on_server)
+ ctx->remote_i_size = new_i_size;
+ if (new_i_size < ctx->zero_point)
+ ctx->zero_point = new_i_size;
}
/**
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 19c0791ed9d5..1a4445bf2ab9 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -50,6 +50,7 @@ struct netlink_kernel_cfg {
struct mutex *cb_mutex;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
+ void (*release) (struct sock *sk, unsigned long *groups);
};
struct sock *__netlink_kernel_create(struct net *net, int unit,
@@ -227,6 +228,13 @@ bool netlink_strict_get_check(struct sk_buff *skb);
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
+
+typedef int (*netlink_filter_fn)(struct sock *dsk, struct sk_buff *skb, void *data);
+
+int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
+ __u32 portid, __u32 group, gfp_t allocation,
+ netlink_filter_fn filter,
+ void *filter_data);
int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
int netlink_register_notifier(struct notifier_block *nb);
int netlink_unregister_notifier(struct notifier_block *nb);
@@ -311,6 +319,7 @@ struct netlink_dump_control {
int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
int (*done)(struct netlink_callback *);
+ struct netlink_ext_ack *extack;
void *data;
struct module *module;
u32 min_dump_alloc;
@@ -344,5 +353,6 @@ bool netlink_ns_capable(const struct sk_buff *skb,
struct user_namespace *ns, int cap);
bool netlink_capable(const struct sk_buff *skb, int cap);
bool netlink_net_capable(const struct sk_buff *skb, int cap);
+struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast);
#endif /* __LINUX_NETLINK_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 730003c4f4af..ef8d2d618d5b 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -150,7 +150,7 @@ enum nfs_opnum4 {
OP_WRITE_SAME = 70,
OP_CLONE = 71,
- /* xattr support (RFC8726) */
+ /* xattr support (RFC8276) */
OP_GETXATTR = 72,
OP_SETXATTR = 73,
OP_LISTXATTRS = 74,
@@ -389,79 +389,203 @@ enum lock_type4 {
NFS4_WRITEW_LT = 4
};
+/*
+ * Symbol names and values are from RFC 7531 Section 2.
+ * "XDR Description of NFSv4.0"
+ */
+enum {
+ FATTR4_SUPPORTED_ATTRS = 0,
+ FATTR4_TYPE = 1,
+ FATTR4_FH_EXPIRE_TYPE = 2,
+ FATTR4_CHANGE = 3,
+ FATTR4_SIZE = 4,
+ FATTR4_LINK_SUPPORT = 5,
+ FATTR4_SYMLINK_SUPPORT = 6,
+ FATTR4_NAMED_ATTR = 7,
+ FATTR4_FSID = 8,
+ FATTR4_UNIQUE_HANDLES = 9,
+ FATTR4_LEASE_TIME = 10,
+ FATTR4_RDATTR_ERROR = 11,
+ FATTR4_ACL = 12,
+ FATTR4_ACLSUPPORT = 13,
+ FATTR4_ARCHIVE = 14,
+ FATTR4_CANSETTIME = 15,
+ FATTR4_CASE_INSENSITIVE = 16,
+ FATTR4_CASE_PRESERVING = 17,
+ FATTR4_CHOWN_RESTRICTED = 18,
+ FATTR4_FILEHANDLE = 19,
+ FATTR4_FILEID = 20,
+ FATTR4_FILES_AVAIL = 21,
+ FATTR4_FILES_FREE = 22,
+ FATTR4_FILES_TOTAL = 23,
+ FATTR4_FS_LOCATIONS = 24,
+ FATTR4_HIDDEN = 25,
+ FATTR4_HOMOGENEOUS = 26,
+ FATTR4_MAXFILESIZE = 27,
+ FATTR4_MAXLINK = 28,
+ FATTR4_MAXNAME = 29,
+ FATTR4_MAXREAD = 30,
+ FATTR4_MAXWRITE = 31,
+ FATTR4_MIMETYPE = 32,
+ FATTR4_MODE = 33,
+ FATTR4_NO_TRUNC = 34,
+ FATTR4_NUMLINKS = 35,
+ FATTR4_OWNER = 36,
+ FATTR4_OWNER_GROUP = 37,
+ FATTR4_QUOTA_AVAIL_HARD = 38,
+ FATTR4_QUOTA_AVAIL_SOFT = 39,
+ FATTR4_QUOTA_USED = 40,
+ FATTR4_RAWDEV = 41,
+ FATTR4_SPACE_AVAIL = 42,
+ FATTR4_SPACE_FREE = 43,
+ FATTR4_SPACE_TOTAL = 44,
+ FATTR4_SPACE_USED = 45,
+ FATTR4_SYSTEM = 46,
+ FATTR4_TIME_ACCESS = 47,
+ FATTR4_TIME_ACCESS_SET = 48,
+ FATTR4_TIME_BACKUP = 49,
+ FATTR4_TIME_CREATE = 50,
+ FATTR4_TIME_DELTA = 51,
+ FATTR4_TIME_METADATA = 52,
+ FATTR4_TIME_MODIFY = 53,
+ FATTR4_TIME_MODIFY_SET = 54,
+ FATTR4_MOUNTED_ON_FILEID = 55,
+};
+
+/*
+ * Symbol names and values are from RFC 5662 Section 2.
+ * "XDR Description of NFSv4.1"
+ */
+enum {
+ FATTR4_DIR_NOTIF_DELAY = 56,
+ FATTR4_DIRENT_NOTIF_DELAY = 57,
+ FATTR4_DACL = 58,
+ FATTR4_SACL = 59,
+ FATTR4_CHANGE_POLICY = 60,
+ FATTR4_FS_STATUS = 61,
+ FATTR4_FS_LAYOUT_TYPES = 62,
+ FATTR4_LAYOUT_HINT = 63,
+ FATTR4_LAYOUT_TYPES = 64,
+ FATTR4_LAYOUT_BLKSIZE = 65,
+ FATTR4_LAYOUT_ALIGNMENT = 66,
+ FATTR4_FS_LOCATIONS_INFO = 67,
+ FATTR4_MDSTHRESHOLD = 68,
+ FATTR4_RETENTION_GET = 69,
+ FATTR4_RETENTION_SET = 70,
+ FATTR4_RETENTEVT_GET = 71,
+ FATTR4_RETENTEVT_SET = 72,
+ FATTR4_RETENTION_HOLD = 73,
+ FATTR4_MODE_SET_MASKED = 74,
+ FATTR4_SUPPATTR_EXCLCREAT = 75,
+ FATTR4_FS_CHARSET_CAP = 76,
+};
+
+/*
+ * Symbol names and values are from RFC 7863 Section 2.
+ * "XDR Description of NFSv4.2"
+ */
+enum {
+ FATTR4_CLONE_BLKSIZE = 77,
+ FATTR4_SPACE_FREED = 78,
+ FATTR4_CHANGE_ATTR_TYPE = 79,
+ FATTR4_SEC_LABEL = 80,
+};
+
+/*
+ * Symbol names and values are from RFC 8275 Section 5.
+ * "The mode_umask Attribute"
+ */
+enum {
+ FATTR4_MODE_UMASK = 81,
+};
+
+/*
+ * Symbol names and values are from RFC 8276 Section 8.6.
+ * "Numeric Values Assigned to Protocol Extensions"
+ */
+enum {
+ FATTR4_XATTR_SUPPORT = 82,
+};
+
+/*
+ * The following internal definitions enable processing the above
+ * attribute bits within 32-bit word boundaries.
+ */
/* Mandatory Attributes */
-#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
-#define FATTR4_WORD0_TYPE (1UL << 1)
-#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2)
-#define FATTR4_WORD0_CHANGE (1UL << 3)
-#define FATTR4_WORD0_SIZE (1UL << 4)
-#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5)
-#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6)
-#define FATTR4_WORD0_NAMED_ATTR (1UL << 7)
-#define FATTR4_WORD0_FSID (1UL << 8)
-#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
-#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
-#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
+#define FATTR4_WORD0_SUPPORTED_ATTRS BIT(FATTR4_SUPPORTED_ATTRS)
+#define FATTR4_WORD0_TYPE BIT(FATTR4_TYPE)
+#define FATTR4_WORD0_FH_EXPIRE_TYPE BIT(FATTR4_FH_EXPIRE_TYPE)
+#define FATTR4_WORD0_CHANGE BIT(FATTR4_CHANGE)
+#define FATTR4_WORD0_SIZE BIT(FATTR4_SIZE)
+#define FATTR4_WORD0_LINK_SUPPORT BIT(FATTR4_LINK_SUPPORT)
+#define FATTR4_WORD0_SYMLINK_SUPPORT BIT(FATTR4_SYMLINK_SUPPORT)
+#define FATTR4_WORD0_NAMED_ATTR BIT(FATTR4_NAMED_ATTR)
+#define FATTR4_WORD0_FSID BIT(FATTR4_FSID)
+#define FATTR4_WORD0_UNIQUE_HANDLES BIT(FATTR4_UNIQUE_HANDLES)
+#define FATTR4_WORD0_LEASE_TIME BIT(FATTR4_LEASE_TIME)
+#define FATTR4_WORD0_RDATTR_ERROR BIT(FATTR4_RDATTR_ERROR)
/* Mandatory in NFSv4.1 */
-#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
+#define FATTR4_WORD2_SUPPATTR_EXCLCREAT BIT(FATTR4_SUPPATTR_EXCLCREAT - 64)
/* Recommended Attributes */
-#define FATTR4_WORD0_ACL (1UL << 12)
-#define FATTR4_WORD0_ACLSUPPORT (1UL << 13)
-#define FATTR4_WORD0_ARCHIVE (1UL << 14)
-#define FATTR4_WORD0_CANSETTIME (1UL << 15)
-#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16)
-#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17)
-#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18)
-#define FATTR4_WORD0_FILEHANDLE (1UL << 19)
-#define FATTR4_WORD0_FILEID (1UL << 20)
-#define FATTR4_WORD0_FILES_AVAIL (1UL << 21)
-#define FATTR4_WORD0_FILES_FREE (1UL << 22)
-#define FATTR4_WORD0_FILES_TOTAL (1UL << 23)
-#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24)
-#define FATTR4_WORD0_HIDDEN (1UL << 25)
-#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26)
-#define FATTR4_WORD0_MAXFILESIZE (1UL << 27)
-#define FATTR4_WORD0_MAXLINK (1UL << 28)
-#define FATTR4_WORD0_MAXNAME (1UL << 29)
-#define FATTR4_WORD0_MAXREAD (1UL << 30)
-#define FATTR4_WORD0_MAXWRITE (1UL << 31)
-#define FATTR4_WORD1_MIMETYPE (1UL << 0)
-#define FATTR4_WORD1_MODE (1UL << 1)
-#define FATTR4_WORD1_NO_TRUNC (1UL << 2)
-#define FATTR4_WORD1_NUMLINKS (1UL << 3)
-#define FATTR4_WORD1_OWNER (1UL << 4)
-#define FATTR4_WORD1_OWNER_GROUP (1UL << 5)
-#define FATTR4_WORD1_QUOTA_HARD (1UL << 6)
-#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7)
-#define FATTR4_WORD1_QUOTA_USED (1UL << 8)
-#define FATTR4_WORD1_RAWDEV (1UL << 9)
-#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10)
-#define FATTR4_WORD1_SPACE_FREE (1UL << 11)
-#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12)
-#define FATTR4_WORD1_SPACE_USED (1UL << 13)
-#define FATTR4_WORD1_SYSTEM (1UL << 14)
-#define FATTR4_WORD1_TIME_ACCESS (1UL << 15)
-#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16)
-#define FATTR4_WORD1_TIME_BACKUP (1UL << 17)
-#define FATTR4_WORD1_TIME_CREATE (1UL << 18)
-#define FATTR4_WORD1_TIME_DELTA (1UL << 19)
-#define FATTR4_WORD1_TIME_METADATA (1UL << 20)
-#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
-#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
-#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
-#define FATTR4_WORD1_DACL (1UL << 26)
-#define FATTR4_WORD1_SACL (1UL << 27)
-#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
-#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
-#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
-#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
-#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
-#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
-#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
-#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
-#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18)
+#define FATTR4_WORD0_ACL BIT(FATTR4_ACL)
+#define FATTR4_WORD0_ACLSUPPORT BIT(FATTR4_ACLSUPPORT)
+#define FATTR4_WORD0_ARCHIVE BIT(FATTR4_ARCHIVE)
+#define FATTR4_WORD0_CANSETTIME BIT(FATTR4_CANSETTIME)
+#define FATTR4_WORD0_CASE_INSENSITIVE BIT(FATTR4_CASE_INSENSITIVE)
+#define FATTR4_WORD0_CASE_PRESERVING BIT(FATTR4_CASE_PRESERVING)
+#define FATTR4_WORD0_CHOWN_RESTRICTED BIT(FATTR4_CHOWN_RESTRICTED)
+#define FATTR4_WORD0_FILEHANDLE BIT(FATTR4_FILEHANDLE)
+#define FATTR4_WORD0_FILEID BIT(FATTR4_FILEID)
+#define FATTR4_WORD0_FILES_AVAIL BIT(FATTR4_FILES_AVAIL)
+#define FATTR4_WORD0_FILES_FREE BIT(FATTR4_FILES_FREE)
+#define FATTR4_WORD0_FILES_TOTAL BIT(FATTR4_FILES_TOTAL)
+#define FATTR4_WORD0_FS_LOCATIONS BIT(FATTR4_FS_LOCATIONS)
+#define FATTR4_WORD0_HIDDEN BIT(FATTR4_HIDDEN)
+#define FATTR4_WORD0_HOMOGENEOUS BIT(FATTR4_HOMOGENEOUS)
+#define FATTR4_WORD0_MAXFILESIZE BIT(FATTR4_MAXFILESIZE)
+#define FATTR4_WORD0_MAXLINK BIT(FATTR4_MAXLINK)
+#define FATTR4_WORD0_MAXNAME BIT(FATTR4_MAXNAME)
+#define FATTR4_WORD0_MAXREAD BIT(FATTR4_MAXREAD)
+#define FATTR4_WORD0_MAXWRITE BIT(FATTR4_MAXWRITE)
+
+#define FATTR4_WORD1_MIMETYPE BIT(FATTR4_MIMETYPE - 32)
+#define FATTR4_WORD1_MODE BIT(FATTR4_MODE - 32)
+#define FATTR4_WORD1_NO_TRUNC BIT(FATTR4_NO_TRUNC - 32)
+#define FATTR4_WORD1_NUMLINKS BIT(FATTR4_NUMLINKS - 32)
+#define FATTR4_WORD1_OWNER BIT(FATTR4_OWNER - 32)
+#define FATTR4_WORD1_OWNER_GROUP BIT(FATTR4_OWNER_GROUP - 32)
+#define FATTR4_WORD1_QUOTA_HARD BIT(FATTR4_QUOTA_AVAIL_HARD - 32)
+#define FATTR4_WORD1_QUOTA_SOFT BIT(FATTR4_QUOTA_AVAIL_SOFT - 32)
+#define FATTR4_WORD1_QUOTA_USED BIT(FATTR4_QUOTA_USED - 32)
+#define FATTR4_WORD1_RAWDEV BIT(FATTR4_RAWDEV - 32)
+#define FATTR4_WORD1_SPACE_AVAIL BIT(FATTR4_SPACE_AVAIL - 32)
+#define FATTR4_WORD1_SPACE_FREE BIT(FATTR4_SPACE_FREE - 32)
+#define FATTR4_WORD1_SPACE_TOTAL BIT(FATTR4_SPACE_TOTAL - 32)
+#define FATTR4_WORD1_SPACE_USED BIT(FATTR4_SPACE_USED - 32)
+#define FATTR4_WORD1_SYSTEM BIT(FATTR4_SYSTEM - 32)
+#define FATTR4_WORD1_TIME_ACCESS BIT(FATTR4_TIME_ACCESS - 32)
+#define FATTR4_WORD1_TIME_ACCESS_SET BIT(FATTR4_TIME_ACCESS_SET - 32)
+#define FATTR4_WORD1_TIME_BACKUP BIT(FATTR4_TIME_BACKUP - 32)
+#define FATTR4_WORD1_TIME_CREATE BIT(FATTR4_TIME_CREATE - 32)
+#define FATTR4_WORD1_TIME_DELTA BIT(FATTR4_TIME_DELTA - 32)
+#define FATTR4_WORD1_TIME_METADATA BIT(FATTR4_TIME_METADATA - 32)
+#define FATTR4_WORD1_TIME_MODIFY BIT(FATTR4_TIME_MODIFY - 32)
+#define FATTR4_WORD1_TIME_MODIFY_SET BIT(FATTR4_TIME_MODIFY_SET - 32)
+#define FATTR4_WORD1_MOUNTED_ON_FILEID BIT(FATTR4_MOUNTED_ON_FILEID - 32)
+#define FATTR4_WORD1_DACL BIT(FATTR4_DACL - 32)
+#define FATTR4_WORD1_SACL BIT(FATTR4_SACL - 32)
+#define FATTR4_WORD1_FS_LAYOUT_TYPES BIT(FATTR4_FS_LAYOUT_TYPES - 32)
+
+#define FATTR4_WORD2_LAYOUT_TYPES BIT(FATTR4_LAYOUT_TYPES - 64)
+#define FATTR4_WORD2_LAYOUT_BLKSIZE BIT(FATTR4_LAYOUT_BLKSIZE - 64)
+#define FATTR4_WORD2_MDSTHRESHOLD BIT(FATTR4_MDSTHRESHOLD - 64)
+#define FATTR4_WORD2_CLONE_BLKSIZE BIT(FATTR4_CLONE_BLKSIZE - 64)
+#define FATTR4_WORD2_CHANGE_ATTR_TYPE BIT(FATTR4_CHANGE_ATTR_TYPE - 64)
+#define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64)
+#define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64)
+#define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
@@ -745,4 +869,26 @@ enum {
RCA4_TYPE_MASK_OTHER_LAYOUT_MAX = 15,
};
+enum nfs_cb_opnum4 {
+ OP_CB_GETATTR = 3,
+ OP_CB_RECALL = 4,
+
+ /* Callback operations new to NFSv4.1 */
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
+
+ /* Callback operations new to NFSv4.2 */
+ OP_CB_OFFLOAD = 15,
+
+ OP_CB_ILLEGAL = 10044,
+};
+
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 279262057a92..f5ce7b101146 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -595,7 +595,6 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
* linux/fs/nfs/write.c
*/
extern int nfs_congestion_kb;
-extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct folio *folio);
extern int nfs_update_folio(struct file *file, struct folio *folio,
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index ea2f7e6b1b0b..92de074e63b9 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -48,6 +48,7 @@ struct nfs_client {
#define NFS_CS_NOPING 6 /* - don't ping on connect */
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
+#define NFS_CS_PNFS 9 /* - Server used for pnfs */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -63,7 +64,8 @@ struct nfs_client {
u32 cl_minorversion;/* NFSv4 minorversion */
unsigned int cl_nconnect; /* Number of connections */
unsigned int cl_max_connect; /* max number of xprts allowed */
- const char * cl_principal; /* used for machine cred */
+ const char * cl_principal; /* used for machine cred */
+ struct xprtsec_parms cl_xprtsec; /* xprt security policy */
#if IS_ENABLED(CONFIG_NFS_V4)
struct list_head cl_ds_clients; /* auth flavor data servers */
@@ -122,6 +124,7 @@ struct nfs_client {
char cl_ipaddr[48];
struct net *cl_net;
struct list_head pending_cb_stateids;
+ struct rcu_head rcu;
};
/*
@@ -153,6 +156,7 @@ struct nfs_server {
#define NFS_MOUNT_WRITE_EAGER 0x01000000
#define NFS_MOUNT_WRITE_WAIT 0x02000000
#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
+#define NFS_MOUNT_SHUTDOWN 0x08000000
unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
@@ -183,6 +187,7 @@ struct nfs_server {
change_attr_type;/* Description of change attribute */
struct nfs_fsid fsid;
+ int s_sysfs_id; /* sysfs dentry index */
__u64 maxfilesize; /* maximum file size */
struct timespec64 time_delta; /* smallest time granularity */
unsigned long mount_time; /* when this fs was mounted */
@@ -235,6 +240,7 @@ struct nfs_server {
struct list_head delegations;
struct list_head ss_copies;
+ unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;
#define NFS_MIG_IN_TRANSITION (1)
@@ -259,6 +265,8 @@ struct nfs_server {
/* User namespace info */
const struct cred *cred;
bool has_sec_mnt_opts;
+ struct kobject kobj;
+ struct rcu_head rcu;
};
/* Server capabilities */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index aa9f4c6ebe26..1c315f854ea8 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -157,7 +157,9 @@ extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
-extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+extern void nfs_join_page_group(struct nfs_page *head,
+ struct nfs_commit_info *cinfo,
+ struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 29a1b39794bf..539b57fbf3ce 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1528,6 +1528,7 @@ struct nfs42_seek_res {
struct nfs42_setxattrargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh *fh;
+ const u32 *bitmask;
const char *xattr_name;
u32 xattr_flags;
size_t xattr_len;
@@ -1537,6 +1538,8 @@ struct nfs42_setxattrargs {
struct nfs42_setxattrres {
struct nfs4_sequence_res seq_res;
struct nfs4_change_info cinfo;
+ struct nfs_fattr *fattr;
+ const struct nfs_server *server;
};
struct nfs42_getxattrargs {
@@ -1769,7 +1772,7 @@ struct nfs_rpc_ops {
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
int (*link) (struct inode *, struct inode *, const struct qstr *);
- int (*symlink) (struct inode *, struct dentry *, struct page *,
+ int (*symlink) (struct inode *, struct dentry *, struct folio *,
unsigned int, struct iattr *);
int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
diff --git a/include/linux/nls.h b/include/linux/nls.h
index 499e486b3722..e0bf8367b274 100644
--- a/include/linux/nls.h
+++ b/include/linux/nls.h
@@ -47,7 +47,7 @@ enum utf16_endian {
/* nls_base.c */
extern int __register_nls(struct nls_table *, struct module *);
extern int unregister_nls(struct nls_table *);
-extern struct nls_table *load_nls(char *);
+extern struct nls_table *load_nls(const char *charset);
extern void unload_nls(struct nls_table *);
extern struct nls_table *load_nls_default(void);
#define register_nls(nls) __register_nls((nls), THIS_MODULE)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 048c0b9aa623..e92e378df000 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -7,19 +7,19 @@
#include <linux/sched.h>
#include <asm/irq.h>
-#if defined(CONFIG_HAVE_NMI_WATCHDOG)
+
+/* Arch specific watchdogs might need to share extra watchdog-related APIs. */
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_ARCH) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
#include <asm/nmi.h>
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
+void lockup_detector_retry_init(void);
void lockup_detector_soft_poweroff(void);
void lockup_detector_cleanup(void);
-bool is_hardlockup(void);
extern int watchdog_user_enabled;
-extern int nmi_watchdog_user_enabled;
-extern int soft_watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
@@ -35,6 +35,7 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
#else /* CONFIG_LOCKUP_DETECTOR */
static inline void lockup_detector_init(void) { }
+static inline void lockup_detector_retry_init(void) { }
static inline void lockup_detector_soft_poweroff(void) { }
static inline void lockup_detector_cleanup(void) { }
#endif /* !CONFIG_LOCKUP_DETECTOR */
@@ -69,17 +70,17 @@ static inline void reset_hung_task_detector(void) { }
* 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
* bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
*
- * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
- * 'soft_watchdog_user_enabled' are variables that are only used as an
+ * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and
+ * 'watchdog_softlockup_user_enabled' are variables that are only used as an
* 'interface' between the parameters in /proc/sys/kernel and the internal
* state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
* handled differently because its value is not boolean, and the lockup
* detectors are 'suspended' while 'watchdog_thresh' is equal zero.
*/
-#define NMI_WATCHDOG_ENABLED_BIT 0
-#define SOFT_WATCHDOG_ENABLED_BIT 1
-#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
-#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+#define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0
+#define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1
+#define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT)
+#define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT)
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
@@ -88,52 +89,63 @@ extern unsigned int hardlockup_panic;
static inline void hardlockup_detector_disable(void) {}
#endif
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
-# define NMI_WATCHDOG_SYSCTL_PERM 0644
+/* Sparc64 has special implemetantion that is always enabled. */
+#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
+void arch_touch_nmi_watchdog(void);
#else
-# define NMI_WATCHDOG_SYSCTL_PERM 0444
+static inline void arch_touch_nmi_watchdog(void) { }
+#endif
+
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
+void watchdog_hardlockup_touch_cpu(unsigned int cpu);
+void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
-extern void arch_touch_nmi_watchdog(void);
extern void hardlockup_detector_perf_stop(void);
extern void hardlockup_detector_perf_restart(void);
-extern void hardlockup_detector_perf_disable(void);
-extern void hardlockup_detector_perf_enable(void);
extern void hardlockup_detector_perf_cleanup(void);
-extern int hardlockup_detector_perf_init(void);
#else
static inline void hardlockup_detector_perf_stop(void) { }
static inline void hardlockup_detector_perf_restart(void) { }
-static inline void hardlockup_detector_perf_disable(void) { }
-static inline void hardlockup_detector_perf_enable(void) { }
static inline void hardlockup_detector_perf_cleanup(void) { }
-# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
-static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
-static inline void arch_touch_nmi_watchdog(void) {}
-# else
-static inline int hardlockup_detector_perf_init(void) { return 0; }
-# endif
#endif
-void watchdog_nmi_stop(void);
-void watchdog_nmi_start(void);
-int watchdog_nmi_probe(void);
-int watchdog_nmi_enable(unsigned int cpu);
-void watchdog_nmi_disable(unsigned int cpu);
+void watchdog_hardlockup_stop(void);
+void watchdog_hardlockup_start(void);
+int watchdog_hardlockup_probe(void);
+void watchdog_hardlockup_enable(unsigned int cpu);
+void watchdog_hardlockup_disable(unsigned int cpu);
void lockup_detector_reconfigure(void);
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY
+void watchdog_buddy_check_hardlockup(int hrtimer_interrupts);
+#else
+static inline void watchdog_buddy_check_hardlockup(int hrtimer_interrupts) {}
+#endif
+
/**
- * touch_nmi_watchdog - restart NMI watchdog timeout.
+ * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout.
*
- * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
- * may be used to reset the timeout - for code which intentionally
- * disables interrupts for a long time. This call is stateless.
+ * If we support detecting hardlockups, touch_nmi_watchdog() may be
+ * used to pet the watchdog (reset the timeout) - for code which
+ * intentionally disables interrupts for a long time. This call is stateless.
+ *
+ * Though this function has "nmi" in the name, the hardlockup watchdog might
+ * not be backed by NMIs. This function will likely be renamed to
+ * touch_hardlockup_watchdog() in the future.
*/
static inline void touch_nmi_watchdog(void)
{
+ /*
+ * Pass on to the hardlockup detector selected via CONFIG_. Note that
+ * the hardlockup detector may not be arch-specific nor using NMIs
+ * and the arch_touch_nmi_watchdog() function will likely be renamed
+ * in the future.
+ */
arch_touch_nmi_watchdog();
+
touch_softlockup_watchdog();
}
@@ -145,31 +157,31 @@ static inline void touch_nmi_watchdog(void)
#ifdef arch_trigger_cpumask_backtrace
static inline bool trigger_all_cpu_backtrace(void)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, false);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, -1);
return true;
}
-static inline bool trigger_allbutself_cpu_backtrace(void)
+static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, true);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu);
return true;
}
static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
{
- arch_trigger_cpumask_backtrace(mask, false);
+ arch_trigger_cpumask_backtrace(mask, -1);
return true;
}
static inline bool trigger_single_cpu_backtrace(int cpu)
{
- arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
+ arch_trigger_cpumask_backtrace(cpumask_of(cpu), -1);
return true;
}
/* generic implementation */
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
- bool exclude_self,
+ int exclude_cpu,
void (*raise)(cpumask_t *mask));
bool nmi_cpu_backtrace(struct pt_regs *regs);
@@ -178,7 +190,7 @@ static inline bool trigger_all_cpu_backtrace(void)
{
return false;
}
-static inline bool trigger_allbutself_cpu_backtrace(void)
+static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
return false;
}
@@ -194,10 +206,11 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh);
+bool arch_perf_nmi_is_available(void);
#endif
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
- defined(CONFIG_HARDLOCKUP_DETECTOR)
+ defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
void watchdog_update_hrtimer_threshold(u64 period);
#else
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
diff --git a/include/linux/node.h b/include/linux/node.h
index 427a5975cf40..25b66d705ee2 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -20,14 +20,14 @@
#include <linux/list.h>
/**
- * struct node_hmem_attrs - heterogeneous memory performance attributes
+ * struct access_coordinate - generic performance coordinates container
*
* @read_bandwidth: Read bandwidth in MB/s
* @write_bandwidth: Write bandwidth in MB/s
* @read_latency: Read latency in nanoseconds
* @write_latency: Write latency in nanoseconds
*/
-struct node_hmem_attrs {
+struct access_coordinate {
unsigned int read_bandwidth;
unsigned int write_bandwidth;
unsigned int read_latency;
@@ -65,7 +65,7 @@ struct node_cache_attrs {
#ifdef CONFIG_HMEM_REPORTING
void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
unsigned access);
#else
static inline void node_add_cache(unsigned int nid,
@@ -74,7 +74,7 @@ static inline void node_add_cache(unsigned int nid,
}
static inline void node_set_perf_attrs(unsigned int nid,
- struct node_hmem_attrs *hmem_attrs,
+ struct access_coordinate *coord,
unsigned access)
{
}
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index bb0ee80526b2..b61438313a73 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -93,10 +93,10 @@
#include <linux/threads.h>
#include <linux/bitmap.h>
#include <linux/minmax.h>
+#include <linux/nodemask_types.h>
#include <linux/numa.h>
#include <linux/random.h>
-typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
/**
@@ -385,7 +385,7 @@ static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
#if MAX_NUMNODES > 1
#define for_each_node_mask(node, mask) \
for ((node) = first_node(mask); \
- (node >= 0) && (node) < MAX_NUMNODES; \
+ (node) < MAX_NUMNODES; \
(node) = next_node((node), (mask)))
#else /* MAX_NUMNODES == 1 */
#define for_each_node_mask(node, mask) \
diff --git a/include/linux/nodemask_types.h b/include/linux/nodemask_types.h
new file mode 100644
index 000000000000..6b28d97ea6ed
--- /dev/null
+++ b/include/linux/nodemask_types.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_NODEMASK_TYPES_H
+#define __LINUX_NODEMASK_TYPES_H
+
+#include <linux/bitops.h>
+#include <linux/numa.h>
+
+typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
+
+#endif /* __LINUX_NODEMASK_TYPES_H */
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 86544707236a..45702bdcbceb 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -73,9 +73,7 @@ struct raw_notifier_head {
struct srcu_notifier_head {
struct mutex mutex;
-#ifdef CONFIG_TREE_SRCU
struct srcu_usage srcuu;
-#endif
struct srcu_struct srcu;
struct notifier_block __rcu *head;
};
@@ -106,7 +104,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
-#ifdef CONFIG_TREE_SRCU
#define SRCU_NOTIFIER_INIT(name, pcpu) \
{ \
.mutex = __MUTEX_INITIALIZER(name.mutex), \
@@ -114,14 +111,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.srcuu = __SRCU_USAGE_INIT(name.srcuu), \
.srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \
}
-#else
-#define SRCU_NOTIFIER_INIT(name, pcpu) \
- { \
- .mutex = __MUTEX_INITIALIZER(name.mutex), \
- .head = NULL, \
- .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \
- }
-#endif
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index fee881cded01..5601d14e2886 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_NSPROXY_H
#define _LINUX_NSPROXY_H
+#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
@@ -29,7 +30,7 @@ struct fs_struct;
* nsproxy is copied.
*/
struct nsproxy {
- atomic_t count;
+ refcount_t count;
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
@@ -102,14 +103,13 @@ int __init nsproxy_cache_init(void);
static inline void put_nsproxy(struct nsproxy *ns)
{
- if (atomic_dec_and_test(&ns->count)) {
+ if (refcount_dec_and_test(&ns->count))
free_nsproxy(ns);
- }
}
static inline void get_nsproxy(struct nsproxy *ns)
{
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
}
#endif
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 392fc6c53e96..4d103ac8f5c7 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -89,10 +89,9 @@ struct nubus_driver {
void (*remove)(struct nubus_board *board);
};
-extern struct bus_type nubus_bus_type;
-
/* Generic NuBus interface functions, modelled after the PCI interface */
#ifdef CONFIG_PROC_FS
+extern bool nubus_populate_procfs;
void nubus_proc_init(void);
struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 59df211d051f..915033a75731 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NUMA_H
#define _LINUX_NUMA_H
+#include <linux/init.h>
#include <linux/types.h>
#ifdef CONFIG_NODES_SHIFT
@@ -12,6 +13,7 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
#define NUMA_NO_NODE (-1)
+#define NUMA_NO_MEMBLK (-1)
/* optionally keep NUMA memory info available post init */
#ifdef CONFIG_NUMA_KEEP_MEMINFO
@@ -21,33 +23,32 @@
#endif
#ifdef CONFIG_NUMA
-#include <linux/printk.h>
#include <asm/sparsemem.h>
/* Generic implementation available */
-int numa_map_to_online_node(int node);
+int numa_nearest_node(int node, unsigned int state);
#ifndef memory_add_physaddr_to_nid
-static inline int memory_add_physaddr_to_nid(u64 start)
-{
- pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
-}
+int memory_add_physaddr_to_nid(u64 start);
#endif
+
#ifndef phys_to_target_node
-static inline int phys_to_target_node(u64 start)
+int phys_to_target_node(u64 start);
+#endif
+
+#ifndef numa_fill_memblks
+static inline int __init numa_fill_memblks(u64 start, u64 end)
{
- pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
+ return NUMA_NO_MEMBLK;
}
#endif
+
#else /* !CONFIG_NUMA */
-static inline int numa_map_to_online_node(int node)
+static inline int numa_nearest_node(int node, unsigned int state)
{
return NUMA_NO_NODE;
}
+
static inline int memory_add_physaddr_to_nid(u64 start)
{
return 0;
@@ -58,6 +59,8 @@ static inline int phys_to_target_node(u64 start)
}
#endif
+#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
+
#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
extern const struct attribute_group arch_node_dev_group;
#endif
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
index dcb8030062dd..c1d0bc5d9624 100644
--- a/include/linux/nvme-auth.h
+++ b/include/linux/nvme-auth.h
@@ -9,9 +9,9 @@
#include <crypto/kpp.h>
struct nvme_dhchap_key {
- u8 *key;
size_t len;
u8 hash;
+ u8 key[];
};
u32 nvme_auth_get_seqnum(void);
@@ -24,10 +24,13 @@ const char *nvme_auth_digest_name(u8 hmac_id);
size_t nvme_auth_hmac_hash_len(u8 hmac_id);
u8 nvme_auth_hmac_id(const char *hmac_name);
+u32 nvme_auth_key_struct_size(u32 key_len);
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash);
void nvme_auth_free_key(struct nvme_dhchap_key *key);
-u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn);
+struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash);
+struct nvme_dhchap_key *nvme_auth_transform_key(
+ struct nvme_dhchap_key *key, char *nqn);
int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *challenge, u8 *aug, size_t hlen);
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index fa092b9be2fd..4109f1bd6128 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -185,7 +185,6 @@ enum nvmefc_fcp_datadir {
* @first_sgl: memory for 1st scatter/gather list segment for payload data
* @sg_cnt: number of elements in the scatter/gather list
* @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
- * @sqid: The nvme SQID the command is being issued on
* @done: The callback routine the LLDD is to invoke upon completion of
* the FCP operation. req argument is the pointer to the original
* FCP IO operation.
@@ -194,12 +193,13 @@ enum nvmefc_fcp_datadir {
* while processing the operation. The length of the buffer
* corresponds to the fcprqst_priv_sz value specified in the
* nvme_fc_port_template supplied by the LLDD.
+ * @sqid: The nvme SQID the command is being issued on
*
* Values set by the LLDD indicating completion status of the FCP operation.
* Must be set prior to calling the done() callback.
+ * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @transferred_length: amount of payload data, in bytes, that were
* transferred. Should equal payload_length on success.
- * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @status: Completion status of the FCP operation. must be 0 upon success,
* negative errno value upon failure (ex: -EIO). Note: this is
* NOT a reflection of the NVME CQE completion status. Only the
@@ -219,14 +219,14 @@ struct nvmefc_fcp_req {
int sg_cnt;
enum nvmefc_fcp_datadir io_dir;
- __le16 sqid;
-
void (*done)(struct nvmefc_fcp_req *req);
void *private;
- u32 transferred_length;
+ __le16 sqid;
+
u16 rcv_rsplen;
+ u32 transferred_length;
u32 status;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h
new file mode 100644
index 000000000000..e10333d78dbb
--- /dev/null
+++ b/include/linux/nvme-keyring.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Hannes Reinecke, SUSE Labs
+ */
+
+#ifndef _NVME_KEYRING_H
+#define _NVME_KEYRING_H
+
+#if IS_ENABLED(CONFIG_NVME_KEYRING)
+
+key_serial_t nvme_tls_psk_default(struct key *keyring,
+ const char *hostnqn, const char *subnqn);
+
+key_serial_t nvme_keyring_id(void);
+
+#else
+
+static inline key_serial_t nvme_tls_psk_default(struct key *keyring,
+ const char *hostnqn, const char *subnqn)
+{
+ return 0;
+}
+static inline key_serial_t nvme_keyring_id(void)
+{
+ return 0;
+}
+#endif /* !CONFIG_NVME_KEYRING */
+#endif /* _NVME_KEYRING_H */
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 57ebe1267f7f..e07e8978d691 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -18,6 +18,12 @@ enum nvme_tcp_pfv {
NVME_TCP_PFV_1_0 = 0x0,
};
+enum nvme_tcp_tls_cipher {
+ NVME_TCP_TLS_CIPHER_INVALID = 0,
+ NVME_TCP_TLS_CIPHER_SHA256 = 1,
+ NVME_TCP_TLS_CIPHER_SHA384 = 2,
+};
+
enum nvme_tcp_fatal_error_status {
NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 779507ac750b..3ef4053ea950 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -20,7 +20,6 @@
#define NVMF_TRSVCID_SIZE 32
#define NVMF_TRADDR_SIZE 256
#define NVMF_TSAS_SIZE 256
-#define NVMF_AUTH_HASH_LEN 64
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
@@ -108,6 +107,13 @@ enum {
NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
};
+/* TSAS SECTYPE for TCP transport */
+enum {
+ NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
+ NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+ NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+};
+
#define NVME_AQ_DEPTH 32
#define NVME_NR_AEN_COMMANDS 1
#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
@@ -473,7 +479,7 @@ struct nvme_id_ns_nvm {
};
enum {
- NVME_ID_NS_NVM_STS_MASK = 0x3f,
+ NVME_ID_NS_NVM_STS_MASK = 0x7f,
NVME_ID_NS_NVM_GUARD_SHIFT = 7,
NVME_ID_NS_NVM_GUARD_MASK = 0x3,
};
@@ -640,6 +646,7 @@ enum {
NVME_CMD_EFFECTS_NCC = 1 << 2,
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
+ NVME_CMD_EFFECTS_CSER_MASK = GENMASK(15, 14),
NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20),
@@ -759,26 +766,55 @@ enum {
NVME_LBART_ATTRIB_HIDE = 1 << 1,
};
+enum nvme_pr_type {
+ NVME_PR_WRITE_EXCLUSIVE = 1,
+ NVME_PR_EXCLUSIVE_ACCESS = 2,
+ NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3,
+ NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4,
+ NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5,
+ NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6,
+};
+
+enum nvme_eds {
+ NVME_EXTENDED_DATA_STRUCT = 0x1,
+};
+
+struct nvme_registered_ctrl {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 rsvd3[5];
+ __le64 hostid;
+ __le64 rkey;
+};
+
struct nvme_reservation_status {
__le32 gen;
__u8 rtype;
__u8 regctl[2];
__u8 resv5[2];
__u8 ptpls;
- __u8 resv10[13];
- struct {
- __le16 cntlid;
- __u8 rcsts;
- __u8 resv3[5];
- __le64 hostid;
- __le64 rkey;
- } regctl_ds[];
+ __u8 resv10[14];
+ struct nvme_registered_ctrl regctl_ds[];
+};
+
+struct nvme_registered_ctrl_ext {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 rsvd3[5];
+ __le64 rkey;
+ __u8 hostid[16];
+ __u8 rsvd32[32];
};
-enum nvme_async_event_type {
- NVME_AER_TYPE_ERROR = 0,
- NVME_AER_TYPE_SMART = 1,
- NVME_AER_TYPE_NOTICE = 2,
+struct nvme_reservation_status_ext {
+ __le32 gen;
+ __u8 rtype;
+ __u8 regctl[2];
+ __u8 resv5[2];
+ __u8 ptpls;
+ __u8 resv10[14];
+ __u8 rsvd24[40];
+ struct nvme_registered_ctrl_ext regctl_eds[];
};
/* I/O commands */
@@ -1458,6 +1494,9 @@ struct nvmf_disc_rsp_page_entry {
__u16 pkey;
__u8 resv10[246];
} rdma;
+ struct tcp {
+ __u8 sectype;
+ } tcp;
} tsas;
};
@@ -1687,7 +1726,7 @@ struct nvmf_auth_dhchap_success1_data {
__u8 rsvd2;
__u8 rvalid;
__u8 rsvd3[7];
- /* 'hl' bytes of response value if 'rvalid' is set */
+ /* 'hl' bytes of response value */
__u8 rval[];
};
@@ -1774,7 +1813,7 @@ struct nvme_command {
};
};
-static inline bool nvme_is_fabrics(struct nvme_command *cmd)
+static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
{
return cmd->common.opcode == nvme_fabrics_command;
}
@@ -1793,7 +1832,7 @@ struct nvme_error_slot {
__u8 resv2[24];
};
-static inline bool nvme_is_write(struct nvme_command *cmd)
+static inline bool nvme_is_write(const struct nvme_command *cmd)
{
/*
* What a mess...
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index fa030d93b768..34c0e58dfa26 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -43,6 +43,8 @@ enum {
NVMEM_REMOVE,
NVMEM_CELL_ADD,
NVMEM_CELL_REMOVE,
+ NVMEM_LAYOUT_ADD,
+ NVMEM_LAYOUT_REMOVE,
};
#if IS_ENABLED(CONFIG_NVMEM)
@@ -79,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf);
const char *nvmem_dev_name(struct nvmem_device *nvmem);
+size_t nvmem_dev_size(struct nvmem_device *nvmem);
void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
size_t nentries);
@@ -125,6 +128,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_u8(struct device *dev,
+ const char *cell_id, u8 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int nvmem_cell_read_u16(struct device *dev,
const char *cell_id, u16 *val)
{
@@ -239,7 +248,6 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
const char *id);
struct nvmem_device *of_nvmem_device_get(struct device_node *np,
const char *name);
-struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem);
#else
static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
const char *id)
@@ -252,12 +260,6 @@ static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
{
return ERR_PTR(-EOPNOTSUPP);
}
-
-static inline struct device_node *
-of_nvmem_layout_get_container(struct nvmem_device *nvmem)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
#endif /* CONFIG_NVMEM && CONFIG_OF */
#endif /* ifndef _LINUX_NVMEM_CONSUMER_H */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index dae26295e6be..f0ba0e03218f 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -9,6 +9,7 @@
#ifndef _LINUX_NVMEM_PROVIDER_H
#define _LINUX_NVMEM_PROVIDER_H
+#include <linux/device.h>
#include <linux/device/driver.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -82,13 +83,15 @@ struct nvmem_cell_info {
* @owner: Pointer to exporter module. Used for refcounting.
* @cells: Optional array of pre-defined NVMEM cells.
* @ncells: Number of elements in cells.
+ * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax.
+ * @fixup_dt_cell_info: Will be called before a cell is added. Can be
+ * used to modify the nvmem_cell_info.
* @keepout: Optional array of keepout ranges (sorted ascending by start).
* @nkeepout: Number of elements in the keepout array.
* @type: Type of the nvmem storage
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
* @of_node: If given, this will be used instead of the parent's of_node.
- * @no_of_node: Device should not use the parent's of_node even if it's !NULL.
* @reg_read: Callback to read data.
* @reg_write: Callback to write data.
* @size: Device size.
@@ -112,6 +115,9 @@ struct nvmem_config {
struct module *owner;
const struct nvmem_cell_info *cells;
int ncells;
+ bool add_legacy_fixed_of_cells;
+ void (*fixup_dt_cell_info)(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell);
const struct nvmem_keepout *keepout;
unsigned int nkeepout;
enum nvmem_type type;
@@ -120,7 +126,6 @@ struct nvmem_config {
bool ignore_wp;
struct nvmem_layout *layout;
struct device_node *of_node;
- bool no_of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
int size;
@@ -154,15 +159,11 @@ struct nvmem_cell_table {
/**
* struct nvmem_layout - NVMEM layout definitions
*
- * @name: Layout name.
- * @of_match_table: Open firmware match table.
+ * @dev: Device-model layout device.
+ * @nvmem: The underlying NVMEM device
* @add_cells: Will be called if a nvmem device is found which
* has this layout. The function will add layout
* specific cells with nvmem_add_one_cell().
- * @fixup_cell_info: Will be called before a cell is added. Can be
- * used to modify the nvmem_cell_info.
- * @owner: Pointer to struct module.
- * @node: List node.
*
* A nvmem device can hold a well defined structure which can just be
* evaluated during runtime. For example a TLV list, or a list of "name=val"
@@ -170,17 +171,15 @@ struct nvmem_cell_table {
* cells.
*/
struct nvmem_layout {
- const char *name;
- const struct of_device_id *of_match_table;
- int (*add_cells)(struct device *dev, struct nvmem_device *nvmem,
- struct nvmem_layout *layout);
- void (*fixup_cell_info)(struct nvmem_device *nvmem,
- struct nvmem_layout *layout,
- struct nvmem_cell_info *cell);
-
- /* private */
- struct module *owner;
- struct list_head node;
+ struct device dev;
+ struct nvmem_device *nvmem;
+ int (*add_cells)(struct nvmem_layout *layout);
+};
+
+struct nvmem_layout_driver {
+ struct device_driver driver;
+ int (*probe)(struct nvmem_layout *layout);
+ void (*remove)(struct nvmem_layout *layout);
};
#if IS_ENABLED(CONFIG_NVMEM)
@@ -197,13 +196,14 @@ void nvmem_del_cell_table(struct nvmem_cell_table *table);
int nvmem_add_one_cell(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info);
-int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner);
-#define nvmem_layout_register(layout) \
- __nvmem_layout_register(layout, THIS_MODULE)
+int nvmem_layout_register(struct nvmem_layout *layout);
void nvmem_layout_unregister(struct nvmem_layout *layout);
-const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
- struct nvmem_layout *layout);
+int nvmem_layout_driver_register(struct nvmem_layout_driver *drv);
+void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv);
+#define module_nvmem_layout_driver(__nvmem_layout_driver) \
+ module_driver(__nvmem_layout_driver, nvmem_layout_driver_register, \
+ nvmem_layout_driver_unregister)
#else
@@ -235,17 +235,27 @@ static inline int nvmem_layout_register(struct nvmem_layout *layout)
static inline void nvmem_layout_unregister(struct nvmem_layout *layout) {}
-static inline const void *
-nvmem_layout_get_match_data(struct nvmem_device *nvmem,
- struct nvmem_layout *layout)
+#endif /* CONFIG_NVMEM */
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+
+/**
+ * of_nvmem_layout_get_container() - Get OF node of layout container
+ *
+ * @nvmem: nvmem device
+ *
+ * Return: a node pointer with refcount incremented or NULL if no
+ * container exists. Use of_node_put() on it when done.
+ */
+struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem);
+
+#else /* CONFIG_NVMEM && CONFIG_OF */
+
+static inline struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
{
return NULL;
}
-#endif /* CONFIG_NVMEM */
-
-#define module_nvmem_layout_driver(__layout_driver) \
- module_driver(__layout_driver, nvmem_layout_register, \
- nvmem_layout_unregister)
+#endif /* CONFIG_NVMEM && CONFIG_OF */
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/objpool.h b/include/linux/objpool.h
new file mode 100644
index 000000000000..15aff4a17f0c
--- /dev/null
+++ b/include/linux/objpool.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_OBJPOOL_H
+#define _LINUX_OBJPOOL_H
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+
+/*
+ * objpool: ring-array based lockless MPMC queue
+ *
+ * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
+ *
+ * objpool is a scalable implementation of high performance queue for
+ * object allocation and reclamation, such as kretprobe instances.
+ *
+ * With leveraging percpu ring-array to mitigate hot spots of memory
+ * contention, it delivers near-linear scalability for high parallel
+ * scenarios. The objpool is best suited for the following cases:
+ * 1) Memory allocation or reclamation are prohibited or too expensive
+ * 2) Consumers are of different priorities, such as irqs and threads
+ *
+ * Limitations:
+ * 1) Maximum objects (capacity) is fixed after objpool creation
+ * 2) All pre-allocated objects are managed in percpu ring array,
+ * which consumes more memory than linked lists
+ */
+
+/**
+ * struct objpool_slot - percpu ring array of objpool
+ * @head: head sequence of the local ring array (to retrieve at)
+ * @tail: tail sequence of the local ring array (to append at)
+ * @last: the last sequence number marked as ready for retrieve
+ * @mask: bits mask for modulo capacity to compute array indexes
+ * @entries: object entries on this slot
+ *
+ * Represents a cpu-local array-based ring buffer, its size is specialized
+ * during initialization of object pool. The percpu objpool node is to be
+ * allocated from local memory for NUMA system, and to be kept compact in
+ * continuous memory: CPU assigned number of objects are stored just after
+ * the body of objpool_node.
+ *
+ * Real size of the ring array is far too smaller than the value range of
+ * head and tail, typed as uint32_t: [0, 2^32), so only lower bits (mask)
+ * of head and tail are used as the actual position in the ring array. In
+ * general the ring array is acting like a small sliding window, which is
+ * always moving forward in the loop of [0, 2^32).
+ */
+struct objpool_slot {
+ uint32_t head;
+ uint32_t tail;
+ uint32_t last;
+ uint32_t mask;
+ void *entries[];
+} __packed;
+
+struct objpool_head;
+
+/*
+ * caller-specified callback for object initial setup, it's only called
+ * once for each object (just after the memory allocation of the object)
+ */
+typedef int (*objpool_init_obj_cb)(void *obj, void *context);
+
+/* caller-specified cleanup callback for objpool destruction */
+typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context);
+
+/**
+ * struct objpool_head - object pooling metadata
+ * @obj_size: object size, aligned to sizeof(void *)
+ * @nr_objs: total objs (to be pre-allocated with objpool)
+ * @nr_cpus: local copy of nr_cpu_ids
+ * @capacity: max objs can be managed by one objpool_slot
+ * @gfp: gfp flags for kmalloc & vmalloc
+ * @ref: refcount of objpool
+ * @flags: flags for objpool management
+ * @cpu_slots: pointer to the array of objpool_slot
+ * @release: resource cleanup callback
+ * @context: caller-provided context
+ */
+struct objpool_head {
+ int obj_size;
+ int nr_objs;
+ int nr_cpus;
+ int capacity;
+ gfp_t gfp;
+ refcount_t ref;
+ unsigned long flags;
+ struct objpool_slot **cpu_slots;
+ objpool_fini_cb release;
+ void *context;
+};
+
+#define OBJPOOL_NR_OBJECT_MAX (1UL << 24) /* maximum numbers of total objects */
+#define OBJPOOL_OBJECT_SIZE_MAX (1UL << 16) /* maximum size of an object */
+
+/**
+ * objpool_init() - initialize objpool and pre-allocated objects
+ * @pool: the object pool to be initialized, declared by caller
+ * @nr_objs: total objects to be pre-allocated by this object pool
+ * @object_size: size of an object (should be > 0)
+ * @gfp: flags for memory allocation (via kmalloc or vmalloc)
+ * @context: user context for object initialization callback
+ * @objinit: object initialization callback for extra setup
+ * @release: cleanup callback for extra cleanup task
+ *
+ * return value: 0 for success, otherwise error code
+ *
+ * All pre-allocated objects are to be zeroed after memory allocation.
+ * Caller could do extra initialization in objinit callback. objinit()
+ * will be called just after slot allocation and called only once for
+ * each object. After that the objpool won't touch any content of the
+ * objects. It's caller's duty to perform reinitialization after each
+ * pop (object allocation) or do clearance before each push (object
+ * reclamation).
+ */
+int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
+ gfp_t gfp, void *context, objpool_init_obj_cb objinit,
+ objpool_fini_cb release);
+
+/**
+ * objpool_pop() - allocate an object from objpool
+ * @pool: object pool
+ *
+ * return value: object ptr or NULL if failed
+ */
+void *objpool_pop(struct objpool_head *pool);
+
+/**
+ * objpool_push() - reclaim the object and return back to objpool
+ * @obj: object ptr to be pushed to objpool
+ * @pool: object pool
+ *
+ * return: 0 or error code (it fails only when user tries to push
+ * the same object multiple times or wrong "objects" into objpool)
+ */
+int objpool_push(void *obj, struct objpool_head *pool);
+
+/**
+ * objpool_drop() - discard the object and deref objpool
+ * @obj: object ptr to be discarded
+ * @pool: object pool
+ *
+ * return: 0 if objpool was released; -EAGAIN if there are still
+ * outstanding objects
+ *
+ * objpool_drop is normally for the release of outstanding objects
+ * after objpool cleanup (objpool_fini). Thinking of this example:
+ * kretprobe is unregistered and objpool_fini() is called to release
+ * all remained objects, but there are still objects being used by
+ * unfinished kretprobes (like blockable function: sys_accept). So
+ * only when the last outstanding object is dropped could the whole
+ * objpool be released along with the call of objpool_drop()
+ */
+int objpool_drop(void *obj, struct objpool_head *pool);
+
+/**
+ * objpool_free() - release objpool forcely (all objects to be freed)
+ * @pool: object pool to be released
+ */
+void objpool_free(struct objpool_head *pool);
+
+/**
+ * objpool_fini() - deref object pool (also releasing unused objects)
+ * @pool: object pool to be dereferenced
+ *
+ * objpool_fini() will try to release all remained free objects and
+ * then drop an extra reference of the objpool. If all objects are
+ * already returned to objpool (so called synchronous use cases),
+ * the objpool itself will be freed together. But if there are still
+ * outstanding objects (so called asynchronous use cases, such like
+ * blockable kretprobe), the objpool won't be released until all
+ * the outstanding objects are dropped, but the caller must assure
+ * there are no concurrent objpool_push() on the fly. Normally RCU
+ * is being required to make sure all ongoing objpool_push() must
+ * be finished before calling objpool_fini(), so does test_objpool,
+ * kretprobe or rethook
+ */
+void objpool_fini(struct objpool_head *pool);
+
+#endif /* _LINUX_OBJPOOL_H */
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
index 03f82c2c2ebf..33212e93f4a6 100644
--- a/include/linux/objtool.h
+++ b/include/linux/objtool.h
@@ -48,13 +48,13 @@
#define ANNOTATE_NOENDBR \
"986: \n\t" \
".pushsection .discard.noendbr\n\t" \
- ".long 986b - .\n\t" \
+ ".long 986b\n\t" \
".popsection\n\t"
#define ASM_REACHABLE \
"998:\n\t" \
".pushsection .discard.reachable\n\t" \
- ".long 998b - .\n\t" \
+ ".long 998b\n\t" \
".popsection\n\t"
#else /* __ASSEMBLY__ */
@@ -66,7 +66,7 @@
#define ANNOTATE_INTRA_FUNCTION_CALL \
999: \
.pushsection .discard.intra_function_calls; \
- .long 999b - .; \
+ .long 999b; \
.popsection;
/*
@@ -118,7 +118,7 @@
.macro ANNOTATE_NOENDBR
.Lhere_\@:
.pushsection .discard.noendbr
- .long .Lhere_\@ - .
+ .long .Lhere_\@
.popsection
.endm
@@ -130,7 +130,8 @@
* it will be ignored.
*/
.macro VALIDATE_UNRET_BEGIN
-#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
+#if defined(CONFIG_NOINSTR_VALIDATION) && \
+ (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
.Lhere_\@:
.pushsection .discard.validate_unret
.long .Lhere_\@ - .
@@ -141,7 +142,7 @@
.macro REACHABLE
.Lhere_\@:
.pushsection .discard.reachable
- .long .Lhere_\@ - .
+ .long .Lhere_\@
.popsection
.endm
diff --git a/include/linux/of.h b/include/linux/of.h
index 6ecde0515677..6a9ddf20e79a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -96,10 +96,12 @@ struct of_reconfig_data {
struct property *old_prop;
};
+extern const struct kobj_type of_node_ktype;
+extern const struct fwnode_operations of_fwnode_ops;
+
/**
* of_node_init - initialize a devicetree node
* @node: Pointer to device node that has been created by kzalloc()
- * @phandle_name: Name of property holding a phandle value
*
* On return the device_node refcount is set to one. Use of_node_put()
* on @node when done to free the memory allocated for it. If the node
@@ -107,9 +109,6 @@ struct of_reconfig_data {
* whether to free the memory will be done by node->release(), which is
* of_node_release().
*/
-/* initialize a node */
-extern const struct kobj_type of_node_ktype;
-extern const struct fwnode_operations of_fwnode_ops;
static inline void of_node_init(struct device_node *node)
{
#if defined(CONFIG_OF_KOBJ)
@@ -1580,6 +1579,29 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
{
return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
}
+
+struct device_node *of_changeset_create_node(struct of_changeset *ocs,
+ struct device_node *parent,
+ const char *full_name);
+int of_changeset_add_prop_string(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name, const char *str);
+int of_changeset_add_prop_string_array(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const char **str_array, size_t sz);
+int of_changeset_add_prop_u32_array(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const u32 *array, size_t sz);
+static inline int of_changeset_add_prop_u32(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const u32 val)
+{
+ return of_changeset_add_prop_u32_array(ocs, np, prop_name, &val, 1);
+}
+
#else /* CONFIG_OF_DYNAMIC */
static inline int of_reconfig_notifier_register(struct notifier_block *nb)
{
@@ -1645,7 +1667,7 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id);
+ int *ovcs_id, struct device_node *target_base);
int of_overlay_remove(int *ovcs_id);
int of_overlay_remove_all(void);
@@ -1654,8 +1676,8 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id)
+static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id, struct device_node *target_base)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 2c7a3d4bc775..9042bca5bb84 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -2,10 +2,7 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
-#include <linux/platform_device.h>
-#include <linux/of_platform.h> /* temporary until merge */
-
-#include <linux/of.h>
+#include <linux/device/driver.h>
struct device;
struct of_device_id;
@@ -40,6 +37,9 @@ static inline int of_dma_configure(struct device *dev,
{
return of_dma_configure_id(dev, np, force_dma, NULL);
}
+
+void of_device_make_bus_id(struct device *dev);
+
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -82,6 +82,9 @@ static inline int of_dma_configure(struct device *dev,
{
return 0;
}
+
+static inline void of_device_make_bus_id(struct device *dev) {}
+
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 9a5e6b410dd2..e61cbbe12dac 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -8,20 +8,19 @@ struct iommu_ops;
#ifdef CONFIG_OF_IOMMU
-extern const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np,
- const u32 *id);
+extern int of_iommu_configure(struct device *dev, struct device_node *master_np,
+ const u32 *id);
extern void of_iommu_get_resv_regions(struct device *dev,
struct list_head *list);
#else
-static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np,
- const u32 *id)
+static inline int of_iommu_configure(struct device *dev,
+ struct device_node *master_np,
+ const u32 *id)
{
- return NULL;
+ return -ENODEV;
}
static inline void of_iommu_get_resv_regions(struct device *dev,
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index d8045bcfc35e..a2ff1ad48f7f 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -7,11 +7,11 @@
*/
#include <linux/mod_devicetable.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
struct device;
+struct device_node;
struct of_device_id;
+struct platform_device;
/**
* struct of_dev_auxdata - lookup table entry for device names & platform_data
@@ -127,10 +127,4 @@ static inline int devm_of_platform_populate(struct device *dev)
static inline void devm_of_platform_depopulate(struct device *dev) { }
#endif
-#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
-extern void of_platform_register_reconfig_notifier(void);
-#else
-static inline void of_platform_register_reconfig_notifier(void) { }
-#endif
-
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 0f4a8903922a..3921fbed0b28 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -17,12 +17,10 @@
* build_OID_registry.pl to generate the data for look_up_OID().
*/
enum OID {
- OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
OID_id_dsa, /* 1.2.840.10040.4.1 */
OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */
OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */
- OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */
OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */
OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */
@@ -30,10 +28,6 @@ enum OID {
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
- OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */
- OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */
- OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */
- OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */
OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */
OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */
OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */
@@ -49,11 +43,6 @@ enum OID {
OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */
- /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */
- OID_md2, /* 1.2.840.113549.2.2 */
- OID_md4, /* 1.2.840.113549.2.4 */
- OID_md5, /* 1.2.840.113549.2.5 */
-
OID_mskrb5, /* 1.2.840.48018.1.2.2 */
OID_krb5, /* 1.2.840.113554.1.2.2 */
OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */
@@ -67,6 +56,7 @@ enum OID {
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */
+ OID_negoex, /* 1.3.6.1.4.1.311.2.2.30 */
OID_spnego, /* 1.3.6.1.5.5.2 */
@@ -74,7 +64,6 @@ enum OID {
OID_PKU2U, /* 1.3.5.1.5.2.7 */
OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
- OID_sha1, /* 1.3.14.3.2.26 */
OID_id_ansip384r1, /* 1.3.132.0.34 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
@@ -140,6 +129,17 @@ enum OID {
OID_TPMImportableKey, /* 2.23.133.10.1.4 */
OID_TPMSealedData, /* 2.23.133.10.1.5 */
+ /* CSOR FIPS-202 SHA-3 */
+ OID_sha3_256, /* 2.16.840.1.101.3.4.2.8 */
+ OID_sha3_384, /* 2.16.840.1.101.3.4.2.9 */
+ OID_sha3_512, /* 2.16.840.1.101.3.4.2.10 */
+ OID_id_ecdsa_with_sha3_256, /* 2.16.840.1.101.3.4.3.10 */
+ OID_id_ecdsa_with_sha3_384, /* 2.16.840.1.101.3.4.3.11 */
+ OID_id_ecdsa_with_sha3_512, /* 2.16.840.1.101.3.4.3.12 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_256, /* 2.16.840.1.101.3.4.3.14 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */
+
OID__NR
};
diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h
index c4602364e909..3c2891d85c41 100644
--- a/include/linux/olpc-ec.h
+++ b/include/linux/olpc-ec.h
@@ -56,6 +56,8 @@ extern int olpc_ec_sci_query(u16 *sci_value);
extern bool olpc_ec_wakeup_available(void);
+asmlinkage int xo1_do_sleep(u8 sleep_state);
+
#else
static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 5581dbd3bd34..ea8fb31379e3 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -6,11 +6,6 @@
* An MCS like lock especially tailored for optimistic spinning for sleeping
* lock implementations (mutex, rwsem, etc).
*/
-struct optimistic_spin_node {
- struct optimistic_spin_node *next, *prev;
- int locked; /* 1 if lock acquired */
- int cpu; /* encoded CPU # + 1 value */
-};
struct optimistic_spin_queue {
/*
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 0e33b5cbdb9f..7b5cf4a5cd19 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -283,7 +283,7 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
* @member: Name of the array member.
* @count: Number of elements in the array.
*
- * Calculates size of memory needed for structure @p followed by an
+ * Calculates size of memory needed for structure of @p followed by an
* array of @count number of @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
@@ -293,4 +293,55 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
sizeof(*(p)) + flex_array_size(p, member, count), \
size_add(sizeof(*(p)), flex_array_size(p, member, count)))
+/**
+ * struct_size_t() - Calculate size of structure with trailing flexible array
+ * @type: structure type name.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @type followed by an
+ * array of @count number of @member elements. Prefer using struct_size()
+ * when possible instead, to keep calculations associated with a specific
+ * instance variable of type @type.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size_t(type, member, count) \
+ struct_size((type *)NULL, member, count)
+
+/**
+ * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass (different) initializer.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ * @initializer: initializer expression (could be empty for no init).
+ */
+#define _DEFINE_FLEX(type, name, member, count, initializer) \
+ _Static_assert(__builtin_constant_p(count), \
+ "onstack flex array members require compile-time const count"); \
+ union { \
+ u8 bytes[struct_size_t(type, member, count)]; \
+ type obj; \
+ } name##_u initializer; \
+ type *name = (type *)&name##_u
+
+/**
+ * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ *
+ * Define a zeroed, on-stack, instance of @type structure with a trailing
+ * flexible array member.
+ * Use __struct_size(@name) to get compile-time size of it afterwards.
+ */
+#define DEFINE_FLEX(type, name, member, count) \
+ _DEFINE_FLEX(type, name, member, count, = {})
+
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 92a2063a0a23..735cddc13d20 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -99,13 +99,15 @@
*/
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
+ PG_writeback, /* Page is under writeback */
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
+ PG_head, /* Must be in bit 6 */
+ PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
- PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_error,
PG_slab,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
@@ -113,8 +115,6 @@ enum pageflags {
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
- PG_writeback, /* Page is under writeback */
- PG_head, /* A head page */
PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
@@ -171,15 +171,6 @@ enum pageflags {
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
-#ifdef CONFIG_MEMORY_FAILURE
- /*
- * Compound pages. Stored in first tail page's flags.
- * Indicates that at least one subpage is hwpoisoned in the
- * THP.
- */
- PG_has_hwpoisoned = PG_error,
-#endif
-
/* non-lru isolated movable page */
PG_isolated = PG_reclaim,
@@ -190,6 +181,17 @@ enum pageflags {
/* For self-hosted memmap pages */
PG_vmemmap_self_hosted = PG_owner_priv_1,
#endif
+
+ /*
+ * Flags only valid for compound pages. Stored in first tail page's
+ * flags word. Cannot use the first 8 flags or any flag marked as
+ * PF_ANY.
+ */
+
+ /* At least one page in this folio has the hwpoison flag set */
+ PG_has_hwpoisoned = PG_error,
+ PG_hugetlb = PG_active,
+ PG_large_rmappable = PG_workingset, /* anon or file-backed */
};
#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
@@ -691,6 +693,25 @@ TESTPAGEFLAG_FALSE(Ksm, ksm)
u64 stable_page_flags(struct page *page);
/**
+ * folio_xor_flags_has_waiters - Change some folio flags.
+ * @folio: The folio.
+ * @mask: Bits set in this word will be changed.
+ *
+ * This must only be used for flags which are changed with the folio
+ * lock held. For example, it is unsafe to use for PG_dirty as that
+ * can be set without the folio lock held. It can also only be used
+ * on flags which are in the range 0-6 as some of the implementations
+ * only affect those bits.
+ *
+ * Return: Whether there are tasks waiting on the folio.
+ */
+static inline bool folio_xor_flags_has_waiters(struct folio *folio,
+ unsigned long mask)
+{
+ return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
+}
+
+/**
* folio_test_uptodate - Is this folio up to date?
* @folio: The folio.
*
@@ -751,19 +772,14 @@ static __always_inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
-bool __folio_start_writeback(struct folio *folio, bool keep_write);
-bool set_page_writeback(struct page *page);
+void __folio_start_writeback(struct folio *folio, bool keep_write);
+void set_page_writeback(struct page *page);
#define folio_start_writeback(folio) \
__folio_start_writeback(folio, false)
#define folio_start_writeback_keepwrite(folio) \
__folio_start_writeback(folio, true)
-static inline bool test_set_page_writeback(struct page *page)
-{
- return set_page_writeback(page);
-}
-
static __always_inline bool folio_test_head(struct folio *folio)
{
return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
@@ -806,13 +822,32 @@ static inline void ClearPageCompound(struct page *page)
BUG_ON(!PageHead(page));
ClearPageHead(page);
}
+PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND)
+#else
+TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
#endif
#define PG_head_mask ((1UL << PG_head))
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
-bool folio_test_hugetlb(struct folio *folio);
+SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
+CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
+
+/**
+ * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
+ * @folio: The folio to test.
+ *
+ * Context: Any context. Caller should have a reference on the folio to
+ * prevent it from being turned into a tail page.
+ * Return: True for hugetlbfs folios, false for anon folios or folios
+ * belonging to other filesystems.
+ */
+static inline bool folio_test_hugetlb(struct folio *folio)
+{
+ return folio_test_large(folio) &&
+ test_bit(PG_hugetlb, folio_flags(folio, 1));
+}
#else
TESTPAGEFLAG_FALSE(Huge, hugetlb)
#endif
@@ -832,11 +867,6 @@ static inline int PageTransHuge(struct page *page)
return PageHead(page);
}
-static inline bool folio_test_transhuge(struct folio *folio)
-{
- return folio_test_head(folio);
-}
-
/*
* PageTransCompound returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
@@ -908,6 +938,8 @@ static inline bool is_page_hwpoison(struct page *page)
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+#define folio_test_type(folio, flag) \
+ ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
static inline int page_type_has_type(unsigned int page_type)
{
@@ -919,27 +951,41 @@ static inline int page_has_type(struct page *page)
return page_type_has_type(page->page_type);
}
-#define PAGE_TYPE_OPS(uname, lname) \
-static __always_inline int Page##uname(struct page *page) \
+#define PAGE_TYPE_OPS(uname, lname, fname) \
+static __always_inline int Page##uname(const struct page *page) \
{ \
return PageType(page, PG_##lname); \
} \
+static __always_inline int folio_test_##fname(const struct folio *folio)\
+{ \
+ return folio_test_type(folio, PG_##lname); \
+} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!PageType(page, 0), page); \
page->page_type &= ~PG_##lname; \
} \
+static __always_inline void __folio_set_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
+ folio->page.page_type &= ~PG_##lname; \
+} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
page->page_type |= PG_##lname; \
-}
+} \
+static __always_inline void __folio_clear_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
+ folio->page.page_type |= PG_##lname; \
+} \
/*
* PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c).
*/
-PAGE_TYPE_OPS(Buddy, buddy)
+PAGE_TYPE_OPS(Buddy, buddy, buddy)
/*
* PageOffline() indicates that the page is logically offline although the
@@ -963,7 +1009,7 @@ PAGE_TYPE_OPS(Buddy, buddy)
* pages should check PageOffline() and synchronize with such drivers using
* page_offline_freeze()/page_offline_thaw().
*/
-PAGE_TYPE_OPS(Offline, offline)
+PAGE_TYPE_OPS(Offline, offline, offline)
extern void page_offline_freeze(void);
extern void page_offline_thaw(void);
@@ -973,12 +1019,12 @@ extern void page_offline_end(void);
/*
* Marks pages in use as page tables.
*/
-PAGE_TYPE_OPS(Table, table)
+PAGE_TYPE_OPS(Table, table, pgtable)
/*
* Marks guardpages used with debug_pagealloc.
*/
-PAGE_TYPE_OPS(Guard, guard)
+PAGE_TYPE_OPS(Guard, guard, guard)
extern bool is_free_buddy_page(struct page *page);
@@ -1040,6 +1086,14 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
#define PAGE_FLAGS_CHECK_AT_PREP \
((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
+/*
+ * Flags stored in the second page of a compound page. They may overlap
+ * the CHECK_AT_FREE flags above, so need to be cleared.
+ */
+#define PAGE_FLAGS_SECOND \
+ (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
+ 1UL << PG_hugetlb | 1UL << PG_large_rmappable)
+
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
/**
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 5456b7be38ae..4ac34392823a 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -37,27 +37,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
-/*
- * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- */
-int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags, gfp_t gfp_flags);
+int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ int migratetype, int flags, gfp_t gfp_flags);
-/*
- * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
- * target range is [start_pfn, end_pfn)
- */
-void
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype);
+void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ int migratetype);
-/*
- * Test all pages in [start_pfn, end_pfn) are isolated or not.
- */
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int isol_flags);
-
-struct page *alloc_migrate_target(struct page *page, unsigned long private);
-
#endif
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 67314f648aeb..be98564191e6 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -8,6 +8,7 @@
struct pglist_data;
+#ifdef CONFIG_PAGE_EXTENSION
/**
* struct page_ext_operations - per page_ext client operations
* @offset: Offset to the client's data within page_ext. Offset is returned to
@@ -29,8 +30,6 @@ struct page_ext_operations {
bool need_shared_flags;
};
-#ifdef CONFIG_PAGE_EXTENSION
-
/*
* The page_ext_flags users must set need_shared_flags to true.
*/
@@ -82,6 +81,12 @@ static inline void page_ext_init(void)
extern struct page_ext *page_ext_get(struct page *page);
extern void page_ext_put(struct page_ext *page_ext);
+static inline void *page_ext_data(struct page_ext *page_ext,
+ struct page_ext_operations *ops)
+{
+ return (void *)(page_ext) + ops->offset;
+}
+
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
void *next = curr;
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index 5cb7bd2078ec..d8f344840643 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -144,9 +144,4 @@ static inline void set_page_idle(struct page *page)
{
folio_set_idle(page_folio(page));
}
-
-static inline void clear_page_idle(struct page *page)
-{
- folio_clear_idle(page_folio(page));
-}
#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 01e16c7696ec..6722941c7cb8 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -14,18 +14,13 @@ extern struct static_key_true page_table_check_disabled;
extern struct page_ext_operations page_table_check_ops;
void __page_table_check_zero(struct page *page, unsigned int order);
-void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t pte);
-void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
- pmd_t pmd);
-void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
- pud_t pud);
-void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, pmd_t pmd);
-void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
- pud_t *pudp, pud_t pud);
+void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
+void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
+void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
+void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
+ unsigned int nr);
+void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd);
+void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
unsigned long addr,
pmd_t pmd);
@@ -46,61 +41,55 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
__page_table_check_zero(page, order);
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm,
- unsigned long addr, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pte_clear(mm, addr, pte);
+ __page_table_check_pte_clear(mm, pte);
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm,
- unsigned long addr, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_clear(mm, addr, pmd);
+ __page_table_check_pmd_clear(mm, pmd);
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm,
- unsigned long addr, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_clear(mm, addr, pud);
+ __page_table_check_pud_clear(mm, pud);
}
-static inline void page_table_check_pte_set(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep,
- pte_t pte)
+static inline void page_table_check_ptes_set(struct mm_struct *mm,
+ pte_t *ptep, pte_t pte, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pte_set(mm, addr, ptep, pte);
+ __page_table_check_ptes_set(mm, ptep, pte, nr);
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm,
- unsigned long addr, pmd_t *pmdp,
+static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_set(mm, addr, pmdp, pmd);
+ __page_table_check_pmd_set(mm, pmdp, pmd);
}
-static inline void page_table_check_pud_set(struct mm_struct *mm,
- unsigned long addr, pud_t *pudp,
+static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_set(mm, addr, pudp, pud);
+ __page_table_check_pud_set(mm, pudp, pud);
}
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
@@ -123,35 +112,29 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
{
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm,
- unsigned long addr, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
{
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm,
- unsigned long addr, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
{
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm,
- unsigned long addr, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
{
}
-static inline void page_table_check_pte_set(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep,
- pte_t pte)
+static inline void page_table_check_ptes_set(struct mm_struct *mm,
+ pte_t *ptep, pte_t pte, unsigned int nr)
{
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm,
- unsigned long addr, pmd_t *pmdp,
+static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
pmd_t pmd)
{
}
-static inline void page_table_check_pud_set(struct mm_struct *mm,
- unsigned long addr, pud_t *pudp,
+static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
pud_t pud)
{
}
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e83c4c095041..3f2409b968ec 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -41,14 +41,14 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER)
+#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_PAGE_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#else /* CONFIG_HUGETLB_PAGE */
/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
-#define pageblock_order MAX_ORDER
+#define pageblock_order MAX_PAGE_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a56308a9d1a4..2df35e65557d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -30,6 +30,9 @@ static inline void invalidate_remote_inode(struct inode *inode)
int invalidate_inode_pages2(struct address_space *mapping);
int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
+int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
+void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
+
int write_inode_now(struct inode *, int sync);
int filemap_fdatawrite(struct address_space *);
int filemap_flush(struct address_space *);
@@ -54,6 +57,7 @@ int filemap_check_errors(struct address_space *mapping);
void __filemap_set_wb_err(struct address_space *mapping, int err);
int filemap_fdatawrite_wbc(struct address_space *mapping,
struct writeback_control *wbc);
+int kiocb_write_and_wait(struct kiocb *iocb, size_t count);
static inline int filemap_write_and_wait(struct address_space *mapping)
{
@@ -199,6 +203,10 @@ enum mapping_flags {
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
AS_LARGE_FOLIO_SUPPORT = 6,
+ AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
+ AS_STABLE_WRITES, /* must wait for writeback before modifying
+ folio contents */
+ AS_UNMOVABLE, /* The mapping cannot be moved, ever */
};
/**
@@ -269,6 +277,52 @@ static inline int mapping_use_writeback_tags(struct address_space *mapping)
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
+static inline bool mapping_release_always(const struct address_space *mapping)
+{
+ return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline void mapping_set_release_always(struct address_space *mapping)
+{
+ set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline void mapping_clear_release_always(struct address_space *mapping)
+{
+ clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline bool mapping_stable_writes(const struct address_space *mapping)
+{
+ return test_bit(AS_STABLE_WRITES, &mapping->flags);
+}
+
+static inline void mapping_set_stable_writes(struct address_space *mapping)
+{
+ set_bit(AS_STABLE_WRITES, &mapping->flags);
+}
+
+static inline void mapping_clear_stable_writes(struct address_space *mapping)
+{
+ clear_bit(AS_STABLE_WRITES, &mapping->flags);
+}
+
+static inline void mapping_set_unmovable(struct address_space *mapping)
+{
+ /*
+ * It's expected unmovable mappings are also unevictable. Compaction
+ * migrate scanner (isolate_migratepages_block()) relies on this to
+ * reduce page locking.
+ */
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+ set_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
+static inline bool mapping_unmovable(struct address_space *mapping)
+{
+ return test_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return mapping->gfp_mask;
@@ -369,23 +423,31 @@ static inline struct address_space *folio_file_mapping(struct folio *folio)
return folio->mapping;
}
-static inline struct address_space *page_file_mapping(struct page *page)
-{
- return folio_file_mapping(page_folio(page));
-}
-
-/*
- * For file cache pages, return the address_space, otherwise return NULL
+/**
+ * folio_flush_mapping - Find the file mapping this folio belongs to.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the mapping that this
+ * page belongs to. Anonymous folios return NULL, even if they're in
+ * the swap cache. Other kinds of folio also return NULL.
+ *
+ * This is ONLY used by architecture cache flushing code. If you aren't
+ * writing cache flushing code, you want either folio_mapping() or
+ * folio_file_mapping().
*/
-static inline struct address_space *page_mapping_file(struct page *page)
+static inline struct address_space *folio_flush_mapping(struct folio *folio)
{
- struct folio *folio = page_folio(page);
-
if (unlikely(folio_test_swapcache(folio)))
return NULL;
+
return folio_mapping(folio);
}
+static inline struct address_space *page_file_mapping(struct page *page)
+{
+ return folio_file_mapping(page_folio(page));
+}
+
/**
* folio_inode - Get the host inode for this folio.
* @folio: The folio.
@@ -466,6 +528,19 @@ static inline void *detach_page_private(struct page *page)
return folio_detach_private(page_folio(page));
}
+/*
+ * There are some parts of the kernel which assume that PMD entries
+ * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
+ * limit the maximum allocation order to PMD size. I'm not aware of any
+ * assumptions about maximum order if THP are disabled, but 8 seems like
+ * a good order (that's 1MB if you're using 4kB pages)
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
+#else
+#define MAX_PAGECACHE_ORDER 8
+#endif
+
#ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
#else
@@ -497,22 +572,69 @@ pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
-#define FGP_ACCESSED 0x00000001
-#define FGP_LOCK 0x00000002
-#define FGP_CREAT 0x00000004
-#define FGP_WRITE 0x00000008
-#define FGP_NOFS 0x00000010
-#define FGP_NOWAIT 0x00000020
-#define FGP_FOR_MMAP 0x00000040
-#define FGP_STABLE 0x00000080
+/**
+ * typedef fgf_t - Flags for getting folios from the page cache.
+ *
+ * Most users of the page cache will not need to use these flags;
+ * there are convenience functions such as filemap_get_folio() and
+ * filemap_lock_folio(). For users which need more control over exactly
+ * what is done with the folios, these flags to __filemap_get_folio()
+ * are available.
+ *
+ * * %FGP_ACCESSED - The folio will be marked accessed.
+ * * %FGP_LOCK - The folio is returned locked.
+ * * %FGP_CREAT - If no folio is present then a new folio is allocated,
+ * added to the page cache and the VM's LRU list. The folio is
+ * returned locked.
+ * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
+ * folio is already in cache. If the folio was allocated, unlock it
+ * before returning so the caller can do the same dance.
+ * * %FGP_WRITE - The folio will be written to by the caller.
+ * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
+ * * %FGP_NOWAIT - Don't block on the folio lock.
+ * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
+ * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
+ * implementation.
+ */
+typedef unsigned int __bitwise fgf_t;
+
+#define FGP_ACCESSED ((__force fgf_t)0x00000001)
+#define FGP_LOCK ((__force fgf_t)0x00000002)
+#define FGP_CREAT ((__force fgf_t)0x00000004)
+#define FGP_WRITE ((__force fgf_t)0x00000008)
+#define FGP_NOFS ((__force fgf_t)0x00000010)
+#define FGP_NOWAIT ((__force fgf_t)0x00000020)
+#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
+#define FGP_STABLE ((__force fgf_t)0x00000080)
+#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
+/**
+ * fgf_set_order - Encode a length in the fgf_t flags.
+ * @size: The suggested size of the folio to create.
+ *
+ * The caller of __filemap_get_folio() can use this to suggest a preferred
+ * size for the folio that is created. If there is already a folio at
+ * the index, it will be returned, no matter what its size. If a folio
+ * is freshly created, it may be of a different size than requested
+ * due to alignment constraints, memory pressure, or the presence of
+ * other folios at nearby indices.
+ */
+static inline fgf_t fgf_set_order(size_t size)
+{
+ unsigned int shift = ilog2(size);
+
+ if (shift <= PAGE_SHIFT)
+ return 0;
+ return (__force fgf_t)((shift - PAGE_SHIFT) << 26);
+}
+
void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
- int fgp_flags, gfp_t gfp);
+ fgf_t fgp_flags, gfp_t gfp);
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
- int fgp_flags, gfp_t gfp);
+ fgf_t fgp_flags, gfp_t gfp);
/**
* filemap_get_folio - Find and get a folio.
@@ -586,7 +708,7 @@ static inline struct page *find_get_page(struct address_space *mapping,
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
- pgoff_t offset, int fgp_flags)
+ pgoff_t offset, fgf_t fgp_flags)
{
return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
@@ -701,9 +823,6 @@ static inline pgoff_t folio_next_index(struct folio *folio)
*/
static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
{
- /* HugeTLBfs indexes the page cache in units of hpage_size */
- if (folio_test_hugetlb(folio))
- return &folio->page;
return folio_page(folio, index & (folio_nr_pages(folio) - 1));
}
@@ -719,9 +838,6 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
*/
static inline bool folio_contains(struct folio *folio, pgoff_t index)
{
- /* HugeTLBfs indexes the page cache in units of hpage_size */
- if (folio_test_hugetlb(folio))
- return folio->index == index;
return index - folio_index(folio) < folio_nr_pages(folio);
}
@@ -779,10 +895,9 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping,
}
/*
- * Get index of the page within radix-tree (but not for hugetlb pages).
- * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
+ * Get the offset in PAGE_SIZE (even for hugetlb pages).
*/
-static inline pgoff_t page_to_index(struct page *page)
+static inline pgoff_t page_to_pgoff(struct page *page)
{
struct page *head;
@@ -797,19 +912,6 @@ static inline pgoff_t page_to_index(struct page *page)
return head->index + page - head;
}
-extern pgoff_t hugetlb_basepage_index(struct page *page);
-
-/*
- * Get the offset in PAGE_SIZE (even for hugetlb pages).
- * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
- */
-static inline pgoff_t page_to_pgoff(struct page *page)
-{
- if (unlikely(PageHuge(page)))
- return hugetlb_basepage_index(page);
- return page_to_index(page);
-}
-
/*
* Return byte-offset into filesystem object for page.
*/
@@ -846,24 +948,16 @@ static inline loff_t folio_file_pos(struct folio *folio)
/*
* Get the offset in PAGE_SIZE (even for hugetlb folios).
- * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
*/
static inline pgoff_t folio_pgoff(struct folio *folio)
{
- if (unlikely(folio_test_hugetlb(folio)))
- return hugetlb_basepage_index(&folio->page);
return folio->index;
}
-extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
- unsigned long address);
-
static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
unsigned long address)
{
pgoff_t pgoff;
- if (unlikely(is_vm_hugetlb_page(vma)))
- return linear_hugepage_index(vma, address);
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
return pgoff;
@@ -896,8 +990,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
void __folio_lock(struct folio *folio);
int __folio_lock_killable(struct folio *folio);
-bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
- unsigned int flags);
+vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
void unlock_page(struct page *page);
void folio_unlock(struct folio *folio);
@@ -1001,11 +1094,13 @@ static inline int folio_lock_killable(struct folio *folio)
* Return value and mmap_lock implications depend on flags; see
* __folio_lock_or_retry().
*/
-static inline bool folio_lock_or_retry(struct folio *folio,
- struct mm_struct *mm, unsigned int flags)
+static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
+ struct vm_fault *vmf)
{
might_sleep();
- return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
+ if (!folio_trylock(folio))
+ return __folio_lock_or_retry(folio, vmf);
+ return 0;
}
/*
@@ -1040,11 +1135,7 @@ static inline void wait_on_page_locked(struct page *page)
folio_wait_locked(page_folio(page));
}
-static inline int wait_on_page_locked_killable(struct page *page)
-{
- return folio_wait_locked_killable(page_folio(page));
-}
-
+void folio_end_read(struct folio *folio, bool success);
void wait_on_page_writeback(struct page *page);
void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio);
@@ -1078,8 +1169,6 @@ int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
#else
#define filemap_migrate_folio NULL
#endif
-void page_endio(struct page *page, bool is_write, int err);
-
void folio_end_private_2(struct folio *folio);
void folio_wait_private_2(struct folio *folio);
int folio_wait_private_2_killable(struct folio *folio);
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index f582f7213ea5..87cc678adc85 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -3,65 +3,18 @@
* include/linux/pagevec.h
*
* In many places it is efficient to batch an operation up against multiple
- * pages. A pagevec is a multipage container which is used for that.
+ * folios. A folio_batch is a container which is used for that.
*/
#ifndef _LINUX_PAGEVEC_H
#define _LINUX_PAGEVEC_H
-#include <linux/xarray.h>
+#include <linux/types.h>
-/* 15 pointers + header align the pagevec structure to a power of two */
+/* 15 pointers + header align the folio_batch structure to a power of two */
#define PAGEVEC_SIZE 15
-struct page;
struct folio;
-struct address_space;
-
-/* Layout must match folio_batch */
-struct pagevec {
- unsigned char nr;
- bool percpu_pvec_drained;
- struct page *pages[PAGEVEC_SIZE];
-};
-
-void __pagevec_release(struct pagevec *pvec);
-
-static inline void pagevec_init(struct pagevec *pvec)
-{
- pvec->nr = 0;
- pvec->percpu_pvec_drained = false;
-}
-
-static inline void pagevec_reinit(struct pagevec *pvec)
-{
- pvec->nr = 0;
-}
-
-static inline unsigned pagevec_count(struct pagevec *pvec)
-{
- return pvec->nr;
-}
-
-static inline unsigned pagevec_space(struct pagevec *pvec)
-{
- return PAGEVEC_SIZE - pvec->nr;
-}
-
-/*
- * Add a page to a pagevec. Returns the number of slots still available.
- */
-static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
-{
- pvec->pages[pvec->nr++] = page;
- return pagevec_space(pvec);
-}
-
-static inline void pagevec_release(struct pagevec *pvec)
-{
- if (pagevec_count(pvec))
- __pagevec_release(pvec);
-}
/**
* struct folio_batch - A collection of folios.
@@ -78,11 +31,6 @@ struct folio_batch {
struct folio *folios[PAGEVEC_SIZE];
};
-/* Layout must match pagevec */
-static_assert(sizeof(struct pagevec) == sizeof(struct folio_batch));
-static_assert(offsetof(struct pagevec, pages) ==
- offsetof(struct folio_batch, folios));
-
/**
* folio_batch_init() - Initialise a batch of folios
* @fbatch: The folio batch.
@@ -105,7 +53,7 @@ static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
return fbatch->nr;
}
-static inline unsigned int fbatch_space(struct folio_batch *fbatch)
+static inline unsigned int folio_batch_space(struct folio_batch *fbatch)
{
return PAGEVEC_SIZE - fbatch->nr;
}
@@ -124,12 +72,15 @@ static inline unsigned folio_batch_add(struct folio_batch *fbatch,
struct folio *folio)
{
fbatch->folios[fbatch->nr++] = folio;
- return fbatch_space(fbatch);
+ return folio_batch_space(fbatch);
}
+void __folio_batch_release(struct folio_batch *pvec);
+
static inline void folio_batch_release(struct folio_batch *fbatch)
{
- pagevec_release((struct pagevec *)fbatch);
+ if (folio_batch_count(fbatch))
+ __folio_batch_release(fbatch);
}
void folio_batch_remove_exceptionals(struct folio_batch *fbatch);
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 27a6df448ee5..27cd1e59ccf7 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -6,6 +6,16 @@
struct mm_walk;
+/* Locking requirement during a page walk. */
+enum page_walk_lock {
+ /* mmap_lock should be locked for read to stabilize the vma tree */
+ PGWALK_RDLOCK = 0,
+ /* vma will be write-locked during the walk */
+ PGWALK_WRLOCK = 1,
+ /* vma is expected to be already write-locked during the walk */
+ PGWALK_WRLOCK_VERIFY = 2,
+};
+
/**
* struct mm_walk_ops - callbacks for walk_page_range
* @pgd_entry: if set, called for each non-empty PGD (top-level) entry
@@ -66,6 +76,7 @@ struct mm_walk_ops {
int (*pre_vma)(unsigned long start, unsigned long end,
struct mm_walk *walk);
void (*post_vma)(struct mm_walk *walk);
+ enum page_walk_lock walk_lock;
};
/*
diff --git a/include/linux/panic.h b/include/linux/panic.h
index 979b776e3bcb..6717b15e798c 100644
--- a/include/linux/panic.h
+++ b/include/linux/panic.h
@@ -32,6 +32,9 @@ extern int sysctl_panic_on_stackoverflow;
extern bool crash_kexec_post_notifiers;
+extern void __stack_chk_fail(void);
+void abort(void);
+
/*
* panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
* holds a CPU number which is executing panic() currently. A value of
diff --git a/include/linux/parport.h b/include/linux/parport.h
index a0bc9e0267b7..fff39bc30629 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -514,7 +514,7 @@ extern int parport_device_proc_register(struct pardevice *device);
extern int parport_device_proc_unregister(struct pardevice *device);
/* If PC hardware is the only type supported, we can optimise a bit. */
-#if !defined(CONFIG_PARPORT_NOT_PC)
+#if !defined(CONFIG_PARPORT_NOT_PC) && defined(CONFIG_PARPORT_PC)
#include <linux/parport_pc.h>
#define parport_write_data(p,x) parport_pc_write_data(p,x)
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 6b1301e2498e..3a4860bd2758 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -93,6 +93,6 @@ extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev);
-int pci_host_common_remove(struct platform_device *pdev);
+void pci_host_common_remove(struct platform_device *pdev);
#endif
#endif
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 301bb0e53707..40ea18f5aa02 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -19,13 +19,6 @@ enum pci_epc_interface_type {
SECONDARY_INTERFACE,
};
-enum pci_epc_irq_type {
- PCI_EPC_IRQ_UNKNOWN,
- PCI_EPC_IRQ_LEGACY,
- PCI_EPC_IRQ_MSI,
- PCI_EPC_IRQ_MSIX,
-};
-
static inline const char *
pci_epc_interface_string(enum pci_epc_interface_type type)
{
@@ -79,7 +72,7 @@ struct pci_epc_ops {
u16 interrupts, enum pci_barno, u32 offset);
int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+ unsigned int type, u16 interrupt_num);
int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
@@ -122,7 +115,7 @@ struct pci_epc_mem {
* struct pci_epc - represents the PCI EPC device
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
- * list_lock: Mutex for protecting pci_epf list
+ * @list_lock: Mutex for protecting pci_epf list
* @ops: function pointers for performing endpoint operations
* @windows: array of address space of the endpoint controller
* @mem: first window of the endpoint controller, which corresponds to
@@ -203,7 +196,9 @@ void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_linkdown(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
+void pci_epc_bme_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
@@ -227,7 +222,7 @@ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+ unsigned int type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index a215dc8ce693..77b146e0f672 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -68,13 +68,17 @@ struct pci_epf_ops {
};
/**
- * struct pci_epf_event_ops - Callbacks for capturing the EPC events
+ * struct pci_epc_event_ops - Callbacks for capturing the EPC events
* @core_init: Callback for the EPC initialization complete event
* @link_up: Callback for the EPC link up event
+ * @link_down: Callback for the EPC link down event
+ * @bme: Callback for the EPC BME (Bus Master Enable) event
*/
struct pci_epc_event_ops {
int (*core_init)(struct pci_epf *epf);
int (*link_up)(struct pci_epf *epf);
+ int (*link_down)(struct pci_epf *epf);
+ int (*bme)(struct pci_epf *epf);
};
/**
@@ -89,11 +93,12 @@ struct pci_epc_event_ops {
* @id_table: identifies EPF devices for probing
*/
struct pci_epf_driver {
- int (*probe)(struct pci_epf *epf);
+ int (*probe)(struct pci_epf *epf,
+ const struct pci_epf_device_id *id);
void (*remove)(struct pci_epf *epf);
struct device_driver driver;
- struct pci_epf_ops *ops;
+ const struct pci_epf_ops *ops;
struct module *owner;
struct list_head epf_group;
const struct pci_epf_device_id *id_table;
@@ -131,6 +136,7 @@ struct pci_epf_bar {
* @epc: the EPC device to which this EPF device is bound
* @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
+ * @id: Pointer to the EPF device ID
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
* @lock: mutex to protect pci_epf_ops
* @sec_epc: the secondary EPC device to which this EPF device is bound
@@ -158,6 +164,7 @@ struct pci_epf {
struct pci_epc *epc;
struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
+ const struct pci_epf_device_id *id;
struct list_head list;
/* mutex to protect against concurrent access of pci_epf_ops */
struct mutex lock;
@@ -214,8 +221,6 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
enum pci_epc_interface_type type);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
-struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
- struct config_group *group);
int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 60b8772b5bd4..7ab0d13672da 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -23,7 +23,7 @@
#ifndef LINUX_PCI_H
#define LINUX_PCI_H
-
+#include <linux/args.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h>
@@ -366,8 +366,8 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI,
this is D0-D3, D0 being fully
functional, and D3 being off. */
- unsigned int imm_ready:1; /* Supports Immediate Readiness */
u8 pm_cap; /* PM capability offset */
+ unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int pme_poll:1; /* Poll device's PME status bit */
@@ -392,9 +392,9 @@ struct pci_dev {
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
+ u16 l1ss; /* L1SS Capability pointer */
unsigned int ltr_path:1; /* Latency Tolerance Reporting
supported from root to here */
- u16 l1ss; /* L1SS Capability pointer */
#endif
unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
@@ -464,12 +464,13 @@ struct pci_dev {
unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
+ unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
+ spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
u32 saved_config_space[16]; /* Config space saved at suspend time */
struct hlist_head saved_cap_space;
- int rom_attr_enabled; /* Display of ROM attribute enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
@@ -712,6 +713,31 @@ static inline bool pci_is_bridge(struct pci_dev *dev)
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
}
+/**
+ * pci_is_vga - check if the PCI device is a VGA device
+ * @pdev: PCI device
+ *
+ * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
+ * VGA Base Class and Sub-Classes:
+ *
+ * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible
+ * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code)
+ *
+ * Return true if the PCI device is a VGA device and uses the legacy VGA
+ * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
+ * aliases).
+ */
+static inline bool pci_is_vga(struct pci_dev *pdev)
+{
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ return true;
+
+ if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
+ return true;
+
+ return false;
+}
+
#define for_each_pci_bridge(dev, bus) \
list_for_each_entry(dev, &bus->devices, bus_list) \
if (!pci_is_bridge(dev)) {} else
@@ -860,7 +886,6 @@ struct module;
/**
* struct pci_driver - PCI driver structure
- * @node: List of driver structures.
* @name: Driver name.
* @id_table: Pointer to table of device IDs the driver is
* interested in. Most drivers should export this
@@ -915,7 +940,6 @@ struct module;
* own I/O address space.
*/
struct pci_driver {
- struct list_head node;
const char *name;
const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
@@ -1048,11 +1072,13 @@ enum {
PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
};
-#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
+#define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
+#define PCI_IRQ_LEGACY PCI_IRQ_INTX /* Deprecated! Use PCI_IRQ_INTX */
+
/* These external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
@@ -1145,6 +1171,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
struct pci_dev *pci_dev_get(struct pci_dev *dev);
void pci_dev_put(struct pci_dev *dev);
+DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
void pci_remove_bus(struct pci_bus *b);
void pci_stop_and_remove_bus_device(struct pci_dev *dev);
void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
@@ -1180,6 +1207,8 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
unsigned int devfn);
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
+struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
+
int pci_dev_present(const struct pci_device_id *ids);
int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
@@ -1212,16 +1241,47 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
+void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
+ u32 clear, u32 set);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
-int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
- u16 clear, u16 set);
+int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
+int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
u32 clear, u32 set);
+/**
+ * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
+ * @dev: PCI device structure of the PCI Express device
+ * @pos: PCI Express Capability Register
+ * @clear: Clear bitmask
+ * @set: Set bitmask
+ *
+ * Perform a Read-Modify-Write (RMW) operation using @clear and @set
+ * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
+ * Capability Registers are accessed concurrently in RMW fashion, hence
+ * require locking which is handled transparently to the caller.
+ */
+static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
+ int pos,
+ u16 clear, u16 set)
+{
+ switch (pos) {
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_RTCTL:
+ return pcie_capability_clear_and_set_word_locked(dev, pos,
+ clear, set);
+ default:
+ return pcie_capability_clear_and_set_word_unlocked(dev, pos,
+ clear, set);
+ }
+}
+
static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
u16 set)
{
@@ -1308,6 +1368,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps);
u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
+int pcie_link_speed_mbps(struct pci_dev *pdev);
void pcie_print_link_status(struct pci_dev *dev);
int pcie_reset_flr(struct pci_dev *dev, bool probe);
int pcie_flr(struct pci_dev *dev);
@@ -1361,6 +1422,7 @@ int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
@@ -1403,7 +1465,6 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
-void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_assign_irq(struct pci_dev *dev);
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
@@ -1565,6 +1626,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata);
int pci_cfg_space_size(struct pci_dev *dev);
unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
@@ -1595,6 +1658,8 @@ struct msix_entry {
u16 entry; /* Driver uses to specify entry, OS writes */
};
+struct msi_domain_template;
+
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
void pci_disable_msi(struct pci_dev *dev);
@@ -1627,6 +1692,11 @@ void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
+bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
+ unsigned int hwsize, void *data);
+struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
+ const struct irq_affinity_desc *affdesc);
+void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
@@ -1690,6 +1760,25 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
{
return cpu_possible_mask;
}
+
+static inline bool pci_create_ims_domain(struct pci_dev *pdev,
+ const struct msi_domain_template *template,
+ unsigned int hwsize, void *data)
+{ return false; }
+
+static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev,
+ union msi_instance_cookie *icookie,
+ const struct irq_affinity_desc *affdesc)
+{
+ struct msi_map map = { .index = -ENOSYS, };
+
+ return map;
+}
+
+static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map)
+{
+}
+
#endif
/**
@@ -1748,6 +1837,7 @@ extern bool pcie_ports_native;
int pci_disable_link_state(struct pci_dev *pdev, int state);
int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
int pci_enable_link_state(struct pci_dev *pdev, int state);
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
void pcie_no_aspm(void);
bool pcie_aspm_support_enabled(void);
bool pcie_aspm_enabled(struct pci_dev *pdev);
@@ -1758,6 +1848,8 @@ static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{ return 0; }
static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
{ return 0; }
+static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{ return 0; }
static inline void pcie_no_aspm(void) { }
static inline bool pcie_aspm_support_enabled(void) { return false; }
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
@@ -1790,6 +1882,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
void pci_dev_lock(struct pci_dev *dev);
int pci_dev_trylock(struct pci_dev *dev);
void pci_dev_unlock(struct pci_dev *dev);
+DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
@@ -1895,6 +1988,9 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
struct pci_dev *from)
{ return NULL; }
+static inline struct pci_dev *pci_get_base_class(unsigned int class,
+ struct pci_dev *from)
+{ return NULL; }
static inline int pci_dev_present(const struct pci_device_id *ids)
{ return 0; }
@@ -1903,6 +1999,7 @@ static inline int pci_dev_present(const struct pci_device_id *ids)
#define pci_dev_put(dev) do { } while (0)
static inline void pci_set_master(struct pci_dev *dev) { }
+static inline void pci_clear_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
@@ -1931,6 +2028,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; }
static inline void pci_restore_state(struct pci_dev *dev) { }
static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{ return 0; }
+static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{ return 0; }
static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{ return 0; }
static inline pci_power_t pci_choose_state(struct pci_dev *dev,
@@ -2042,14 +2141,14 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
(pci_resource_end((dev), (bar)) ? \
resource_size(pci_resource_n((dev), (bar))) : 0)
-#define __pci_dev_for_each_res0(dev, res, ...) \
- for (unsigned int __b = 0; \
- res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
+#define __pci_dev_for_each_res0(dev, res, ...) \
+ for (unsigned int __b = 0; \
+ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
__b++)
-#define __pci_dev_for_each_res1(dev, res, __b) \
- for (__b = 0; \
- res = pci_resource_n(dev, __b), __b < PCI_NUM_RESOURCES; \
+#define __pci_dev_for_each_res1(dev, res, __b) \
+ for (__b = 0; \
+ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
__b++)
#define pci_dev_for_each_resource(dev, res, ...) \
@@ -2259,6 +2358,11 @@ int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
+#if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
+extern int pci_create_resource_files(struct pci_dev *dev);
+extern void pci_remove_resource_files(struct pci_dev *dev);
+#endif
+
#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
void __init pci_mmcfg_early_init(void);
void __init pci_mmcfg_late_init(void);
@@ -2581,14 +2685,6 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
-struct msi_domain_template;
-
-bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
- unsigned int hwsize, void *data);
-struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
- const struct irq_affinity_desc *affdesc);
-void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
-
#include <linux/dma-mapping.h>
#define pci_printk(level, pdev, fmt, arg...) \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 95f33dadb2be..a0c75e467df3 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2,7 +2,7 @@
/*
* PCI Class, Vendor and Device IDs
*
- * Please keep sorted.
+ * Please keep sorted by numeric Vendor ID and Device ID.
*
* Do not add new entries to this file unless the definitions
* are shared between multiple drivers.
@@ -151,6 +151,9 @@
#define PCI_CLASS_SP_DPIO 0x1100
#define PCI_CLASS_SP_OTHER 0x1180
+#define PCI_BASE_CLASS_ACCELERATOR 0x12
+#define PCI_CLASS_ACCELERATOR_PROCESSING 0x1200
+
#define PCI_CLASS_OTHERS 0xff
/* Vendors and devices. Sort key: vendor first, device next. */
@@ -158,6 +161,11 @@
#define PCI_VENDOR_ID_LOONGSON 0x0014
+#define PCI_DEVICE_ID_LOONGSON_HDA 0x7a07
+#define PCI_DEVICE_ID_LOONGSON_HDMI 0x7a37
+
+#define PCI_VENDOR_ID_SOLIDIGM 0x025e
+
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_DEVICE_ID_TTTECH_MC322 0x000a
@@ -172,6 +180,8 @@
#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
+#define PCI_VENDOR_ID_ITTIM 0x0b48
+
#define PCI_VENDOR_ID_COMPAQ 0x0e11
#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
@@ -568,6 +578,11 @@
#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
+#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
+#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
+#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
+#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -1751,6 +1766,10 @@
#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701
#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_AX99100 0x9100
+#define PCI_DEVICE_ID_ASIX_AX99100_LB 0x9110
+
#define PCI_VENDOR_ID_ESS 0x125d
#define PCI_DEVICE_ID_ESS_ESS1968 0x1968
#define PCI_DEVICE_ID_ESS_ESS1978 0x1978
@@ -2586,6 +2605,8 @@
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+#define PCI_VENDOR_ID_ALIBABA 0x1ded
+
#define PCI_VENDOR_ID_TEHUTI 0x1fc9
#define PCI_DEVICE_ID_TEHUTI_3009 0x3009
#define PCI_DEVICE_ID_TEHUTI_3010 0x3010
@@ -2635,6 +2656,7 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
+#define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
@@ -2650,8 +2672,10 @@
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
#define PCI_DEVICE_ID_INTEL_82425 0x0486
+#define PCI_DEVICE_ID_INTEL_HDA_CML_H 0x06c8
#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
+#define PCI_DEVICE_ID_INTEL_HDA_OAKTRAIL 0x080a
#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820
#define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821
#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
@@ -2661,15 +2685,19 @@
#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095e
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_0 0x0a0c
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_2 0x0c0c
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_3 0x0d0c
+#define PCI_DEVICE_ID_INTEL_HDA_BYT 0x0f04
+#define PCI_DEVICE_ID_INTEL_SST_BYT 0x0f28
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108f
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
+#define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
-#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
-#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#define PCI_DEVICE_ID_INTEL_82437 0x122d
#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230
@@ -2695,20 +2723,26 @@
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
+#define PCI_DEVICE_ID_INTEL_HDA_BDW 0x160c
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
+#define PCI_DEVICE_ID_INTEL_HDA_CPT 0x1c20
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
+#define PCI_DEVICE_ID_INTEL_HDA_PBG 0x1d20
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
+#define PCI_DEVICE_ID_INTEL_HDA_PPT 0x1e20
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d
+#define PCI_DEVICE_ID_INTEL_HDA_BSW 0x2284
+#define PCI_DEVICE_ID_INTEL_SST_BSW 0x22a8
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
@@ -2763,6 +2797,8 @@
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
+#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
+#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
@@ -2784,12 +2820,14 @@
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
+#define PCI_DEVICE_ID_INTEL_HDA_ICH6 0x2668
#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a
#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d
#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e
#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f
#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
+#define PCI_DEVICE_ID_INTEL_HDA_ESB2 0x269a
#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
@@ -2797,11 +2835,12 @@
#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27a0
#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27a2
+#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
-#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc
#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd
+#define PCI_DEVICE_ID_INTEL_HDA_ICH7 0x27d8
#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da
#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd
#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de
@@ -2812,17 +2851,20 @@
#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
+#define PCI_DEVICE_ID_INTEL_HDA_ICH8 0x284b
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0
#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
-#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913
#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
-#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
-#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
+#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
+#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
+#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
+#define PCI_DEVICE_ID_INTEL_HDA_ICH9_0 0x293e
+#define PCI_DEVICE_ID_INTEL_HDA_ICH9_1 0x293f
#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18
#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19
#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a
@@ -2839,8 +2881,8 @@
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33
-#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70
@@ -2874,6 +2916,7 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3
+#define PCI_DEVICE_ID_INTEL_HDA_GML 0x3198
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
@@ -2884,12 +2927,13 @@
#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432
#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_LP 0x34c8
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
-#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
-#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
+#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
+#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592
#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595
@@ -2899,11 +2943,11 @@
#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
+#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
+#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
#define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c
#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f
#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610
-#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
-#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
@@ -2916,14 +2960,19 @@
#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8
#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_N 0x38c8
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
+#define PCI_DEVICE_ID_INTEL_HDA_ICH10_0 0x3a3e
#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
+#define PCI_DEVICE_ID_INTEL_HDA_ICH10_1 0x3a6e
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f
+#define PCI_DEVICE_ID_INTEL_HDA_5_3400_SERIES_0 0x3b56
+#define PCI_DEVICE_ID_INTEL_HDA_5_3400_SERIES_1 0x3b57
#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20
#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21
#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22
@@ -2934,16 +2983,12 @@
#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27
#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e
#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f
-#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
-#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
-#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
-#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
-#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
#define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41
#define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42
#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
@@ -2955,17 +3000,40 @@
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
+#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
+#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
+#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
+#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_H 0x3dc8
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
+#define PCI_DEVICE_ID_INTEL_HDA_TGL_H 0x43c8
+#define PCI_DEVICE_ID_INTEL_HDA_DG1 0x490d
+#define PCI_DEVICE_ID_INTEL_HDA_EHL_0 0x4b55
+#define PCI_DEVICE_ID_INTEL_HDA_EHL_3 0x4b58
+#define PCI_DEVICE_ID_INTEL_HDA_JSL_N 0x4dc8
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_0 0x4f90
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_1 0x4f91
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_2 0x4f92
#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_P 0x51c8
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_PS 0x51c9
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_P_0 0x51ca
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_P_1 0x51cb
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_M 0x51cc
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_PX 0x51cd
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_M 0x51ce
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_PX 0x51cf
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_N 0x54c8
+#define PCI_DEVICE_ID_INTEL_HDA_APL 0x5a98
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
@@ -2999,8 +3067,14 @@
#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2
#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
+#define PCI_DEVICE_ID_INTEL_HDA_ARL 0x7728
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_S 0x7a50
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_S 0x7ad0
+#define PCI_DEVICE_ID_INTEL_HDA_MTL 0x7e28
+#define PCI_DEVICE_ID_INTEL_HDA_ARL_S 0x7f50
#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119
#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a
+#define PCI_DEVICE_ID_INTEL_HDA_POULSBO 0x811b
#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183
#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186
#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
@@ -3009,9 +3083,31 @@
#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb
#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
+#define PCI_DEVICE_ID_INTEL_HDA_LPT 0x8c20
+#define PCI_DEVICE_ID_INTEL_HDA_9_SERIES 0x8ca0
+#define PCI_DEVICE_ID_INTEL_HDA_WBG_0 0x8d20
+#define PCI_DEVICE_ID_INTEL_HDA_WBG_1 0x8d21
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_HDA_LKF 0x98c8
#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
+#define PCI_DEVICE_ID_INTEL_HDA_LPT_LP_0 0x9c20
+#define PCI_DEVICE_ID_INTEL_HDA_LPT_LP_1 0x9c21
+#define PCI_DEVICE_ID_INTEL_HDA_WPT_LP 0x9ca0
+#define PCI_DEVICE_ID_INTEL_HDA_SKL_LP 0x9d70
+#define PCI_DEVICE_ID_INTEL_HDA_KBL_LP 0x9d71
+#define PCI_DEVICE_ID_INTEL_HDA_CNL_LP 0x9dc8
+#define PCI_DEVICE_ID_INTEL_HDA_TGL_LP 0xa0c8
+#define PCI_DEVICE_ID_INTEL_HDA_SKL 0xa170
+#define PCI_DEVICE_ID_INTEL_HDA_KBL 0xa171
+#define PCI_DEVICE_ID_INTEL_HDA_LBG_0 0xa1f0
+#define PCI_DEVICE_ID_INTEL_HDA_LBG_1 0xa270
+#define PCI_DEVICE_ID_INTEL_HDA_KBL_H 0xa2f0
+#define PCI_DEVICE_ID_INTEL_HDA_CNL_H 0xa348
+#define PCI_DEVICE_ID_INTEL_HDA_CML_S 0xa3f0
+#define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_DEVICE_ID_INTEL_HDA_CML_R 0xf0c8
+#define PCI_DEVICE_ID_INTEL_HDA_RKL_S 0xf1c8
#define PCI_VENDOR_ID_WANGXUN 0x8088
diff --git a/include/linux/pcs-altera-tse.h b/include/linux/pcs-altera-tse.h
deleted file mode 100644
index 92ab9f08e835..000000000000
--- a/include/linux/pcs-altera-tse.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2022 Bootlin
- *
- * Maxime Chevallier <maxime.chevallier@bootlin.com>
- */
-
-#ifndef __LINUX_PCS_ALTERA_TSE_H
-#define __LINUX_PCS_ALTERA_TSE_H
-
-struct phylink_pcs;
-struct net_device;
-
-struct phylink_pcs *alt_tse_pcs_create(struct net_device *ndev,
- void __iomem *pcs_base, int reg_width);
-
-#endif /* __LINUX_PCS_ALTERA_TSE_H */
diff --git a/include/linux/pcs-lynx.h b/include/linux/pcs-lynx.h
index 5712cc2ce775..7958cccd16f2 100644
--- a/include/linux/pcs-lynx.h
+++ b/include/linux/pcs-lynx.h
@@ -9,9 +9,8 @@
#include <linux/mdio.h>
#include <linux/phylink.h>
-struct mdio_device *lynx_get_mdio_device(struct phylink_pcs *pcs);
-
-struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio);
+struct phylink_pcs *lynx_pcs_create_mdiodev(struct mii_bus *bus, int addr);
+struct phylink_pcs *lynx_pcs_create_fwnode(struct fwnode_handle *node);
void lynx_pcs_destroy(struct phylink_pcs *pcs);
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
index d2da1e0b4a92..da3a6c30f6d2 100644
--- a/include/linux/pcs/pcs-xpcs.h
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -18,6 +18,13 @@
#define DW_AN_C37_SGMII 2
#define DW_2500BASEX 3
#define DW_AN_C37_1000BASEX 4
+#define DW_10GBASER 5
+
+/* device vendor OUI */
+#define DW_OUI_WX 0x0018fc80
+
+/* dev_flag */
+#define DW_DEV_TXGBE BIT(0)
struct xpcs_id;
@@ -25,18 +32,20 @@ struct dw_xpcs {
struct mdio_device *mdiodev;
const struct xpcs_id *id;
struct phylink_pcs pcs;
+ phy_interface_t interface;
+ int dev_flag;
};
int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
-void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
- unsigned int mode, const unsigned long *advertising);
+ const unsigned long *advertising, unsigned int neg_mode);
void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces);
int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns,
int enable);
-struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
- phy_interface_t interface);
+struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
+ phy_interface_t interface);
void xpcs_destroy(struct dw_xpcs *xpcs);
#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/pds/pds_adminq.h b/include/linux/pds/pds_adminq.h
index 98a60ce87b92..4b4e9a98b37b 100644
--- a/include/linux/pds/pds_adminq.h
+++ b/include/linux/pds/pds_adminq.h
@@ -222,6 +222,27 @@ enum pds_core_lif_type {
PDS_CORE_LIF_TYPE_DEFAULT = 0,
};
+#define PDS_CORE_IFNAMSIZ 16
+
+/**
+ * enum pds_core_logical_qtype - Logical Queue Types
+ * @PDS_CORE_QTYPE_ADMINQ: Administrative Queue
+ * @PDS_CORE_QTYPE_NOTIFYQ: Notify Queue
+ * @PDS_CORE_QTYPE_RXQ: Receive Queue
+ * @PDS_CORE_QTYPE_TXQ: Transmit Queue
+ * @PDS_CORE_QTYPE_EQ: Event Queue
+ * @PDS_CORE_QTYPE_MAX: Max queue type supported
+ */
+enum pds_core_logical_qtype {
+ PDS_CORE_QTYPE_ADMINQ = 0,
+ PDS_CORE_QTYPE_NOTIFYQ = 1,
+ PDS_CORE_QTYPE_RXQ = 2,
+ PDS_CORE_QTYPE_TXQ = 3,
+ PDS_CORE_QTYPE_EQ = 4,
+
+ PDS_CORE_QTYPE_MAX = 16 /* don't change - used in struct size */
+};
+
/**
* union pds_core_lif_config - LIF configuration
* @state: LIF state (enum pds_core_lif_state)
@@ -584,6 +605,580 @@ struct pds_core_q_init_comp {
u8 color;
};
+/*
+ * enum pds_vdpa_cmd_opcode - vDPA Device commands
+ */
+enum pds_vdpa_cmd_opcode {
+ PDS_VDPA_CMD_INIT = 48,
+ PDS_VDPA_CMD_IDENT = 49,
+ PDS_VDPA_CMD_RESET = 51,
+ PDS_VDPA_CMD_VQ_RESET = 52,
+ PDS_VDPA_CMD_VQ_INIT = 53,
+ PDS_VDPA_CMD_STATUS_UPDATE = 54,
+ PDS_VDPA_CMD_SET_FEATURES = 55,
+ PDS_VDPA_CMD_SET_ATTR = 56,
+};
+
+/**
+ * struct pds_vdpa_cmd - generic command
+ * @opcode: Opcode
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ */
+struct pds_vdpa_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_vdpa_init_cmd - INIT command
+ * @opcode: Opcode PDS_VDPA_CMD_INIT
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ */
+struct pds_vdpa_init_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_vdpa_ident - vDPA identification data
+ * @hw_features: vDPA features supported by device
+ * @max_vqs: max queues available (2 queues for a single queuepair)
+ * @max_qlen: log(2) of maximum number of descriptors
+ * @min_qlen: log(2) of minimum number of descriptors
+ *
+ * This struct is used in a DMA block that is set up for the PDS_VDPA_CMD_IDENT
+ * transaction. Set up the DMA block and send the address in the IDENT cmd
+ * data, the DSC will write the ident information, then we can remove the DMA
+ * block after reading the answer. If the completion status is 0, then there
+ * is valid information, else there was an error and the data should be invalid.
+ */
+struct pds_vdpa_ident {
+ __le64 hw_features;
+ __le16 max_vqs;
+ __le16 max_qlen;
+ __le16 min_qlen;
+};
+
+/**
+ * struct pds_vdpa_ident_cmd - IDENT command
+ * @opcode: Opcode PDS_VDPA_CMD_IDENT
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @len: length of ident info DMA space
+ * @ident_pa: address for DMA of ident info (struct pds_vdpa_ident)
+ * only used for this transaction, then forgotten by DSC
+ */
+struct pds_vdpa_ident_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ __le32 len;
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_vdpa_status_cmd - STATUS_UPDATE command
+ * @opcode: Opcode PDS_VDPA_CMD_STATUS_UPDATE
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @status: new status bits
+ */
+struct pds_vdpa_status_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ u8 status;
+};
+
+/**
+ * enum pds_vdpa_attr - List of VDPA device attributes
+ * @PDS_VDPA_ATTR_MAC: MAC address
+ * @PDS_VDPA_ATTR_MAX_VQ_PAIRS: Max virtqueue pairs
+ */
+enum pds_vdpa_attr {
+ PDS_VDPA_ATTR_MAC = 1,
+ PDS_VDPA_ATTR_MAX_VQ_PAIRS = 2,
+};
+
+/**
+ * struct pds_vdpa_setattr_cmd - SET_ATTR command
+ * @opcode: Opcode PDS_VDPA_CMD_SET_ATTR
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @attr: attribute to be changed (enum pds_vdpa_attr)
+ * @pad: Word boundary padding
+ * @mac: new mac address to be assigned as vdpa device address
+ * @max_vq_pairs: new limit of virtqueue pairs
+ */
+struct pds_vdpa_setattr_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ u8 attr;
+ u8 pad[3];
+ union {
+ u8 mac[6];
+ __le16 max_vq_pairs;
+ } __packed;
+};
+
+/**
+ * struct pds_vdpa_vq_init_cmd - queue init command
+ * @opcode: Opcode PDS_VDPA_CMD_VQ_INIT
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @qid: Queue id (bit0 clear = rx, bit0 set = tx, qid=N is ctrlq)
+ * @len: log(2) of max descriptor count
+ * @desc_addr: DMA address of descriptor area
+ * @avail_addr: DMA address of available descriptors (aka driver area)
+ * @used_addr: DMA address of used descriptors (aka device area)
+ * @intr_index: interrupt index
+ * @avail_index: initial device position in available ring
+ * @used_index: initial device position in used ring
+ */
+struct pds_vdpa_vq_init_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le16 qid;
+ __le16 len;
+ __le64 desc_addr;
+ __le64 avail_addr;
+ __le64 used_addr;
+ __le16 intr_index;
+ __le16 avail_index;
+ __le16 used_index;
+};
+
+/**
+ * struct pds_vdpa_vq_init_comp - queue init completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @hw_qtype: HW queue type, used in doorbell selection
+ * @hw_qindex: HW queue index, used in doorbell selection
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_vdpa_vq_init_comp {
+ u8 status;
+ u8 hw_qtype;
+ __le16 hw_qindex;
+ u8 rsvd[11];
+ u8 color;
+};
+
+/**
+ * struct pds_vdpa_vq_reset_cmd - queue reset command
+ * @opcode: Opcode PDS_VDPA_CMD_VQ_RESET
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @qid: Queue id
+ */
+struct pds_vdpa_vq_reset_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le16 qid;
+};
+
+/**
+ * struct pds_vdpa_vq_reset_comp - queue reset completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd0: Word boundary padding
+ * @avail_index: current device position in available ring
+ * @used_index: current device position in used ring
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_vdpa_vq_reset_comp {
+ u8 status;
+ u8 rsvd0;
+ __le16 avail_index;
+ __le16 used_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct pds_vdpa_set_features_cmd - set hw features
+ * @opcode: Opcode PDS_VDPA_CMD_SET_FEATURES
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @rsvd: Word boundary padding
+ * @features: Feature bit mask
+ */
+struct pds_vdpa_set_features_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le32 rsvd;
+ __le64 features;
+};
+
+#define PDS_LM_DEVICE_STATE_LENGTH 65536
+#define PDS_LM_CHECK_DEVICE_STATE_LENGTH(X) \
+ PDS_CORE_SIZE_CHECK(union, PDS_LM_DEVICE_STATE_LENGTH, X)
+
+/*
+ * enum pds_lm_cmd_opcode - Live Migration Device commands
+ */
+enum pds_lm_cmd_opcode {
+ PDS_LM_CMD_HOST_VF_STATUS = 1,
+
+ /* Device state commands */
+ PDS_LM_CMD_STATE_SIZE = 16,
+ PDS_LM_CMD_SUSPEND = 18,
+ PDS_LM_CMD_SUSPEND_STATUS = 19,
+ PDS_LM_CMD_RESUME = 20,
+ PDS_LM_CMD_SAVE = 21,
+ PDS_LM_CMD_RESTORE = 22,
+
+ /* Dirty page tracking commands */
+ PDS_LM_CMD_DIRTY_STATUS = 32,
+ PDS_LM_CMD_DIRTY_ENABLE = 33,
+ PDS_LM_CMD_DIRTY_DISABLE = 34,
+ PDS_LM_CMD_DIRTY_READ_SEQ = 35,
+ PDS_LM_CMD_DIRTY_WRITE_ACK = 36,
+};
+
+/**
+ * struct pds_lm_cmd - generic command
+ * @opcode: Opcode
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Structure padding to 60 Bytes
+ */
+struct pds_lm_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[56];
+};
+
+/**
+ * struct pds_lm_state_size_cmd - STATE_SIZE command
+ * @opcode: Opcode
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ */
+struct pds_lm_state_size_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_lm_state_size_comp - STATE_SIZE command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the desc ring for which this is the completion
+ * @size: Size of the device state
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_lm_state_size_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ __le64 size;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+enum pds_lm_suspend_resume_type {
+ PDS_LM_SUSPEND_RESUME_TYPE_FULL = 0,
+ PDS_LM_SUSPEND_RESUME_TYPE_P2P = 1,
+};
+
+/**
+ * struct pds_lm_suspend_cmd - SUSPEND command
+ * @opcode: Opcode PDS_LM_CMD_SUSPEND
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of suspend (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_suspend_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_suspend_status_cmd - SUSPEND status command
+ * @opcode: Opcode PDS_AQ_CMD_LM_SUSPEND_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of suspend (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_suspend_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_resume_cmd - RESUME command
+ * @opcode: Opcode PDS_LM_CMD_RESUME
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of resume (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_resume_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_sg_elem - Transmit scatter-gather (SG) descriptor element
+ * @addr: DMA address of SG element data buffer
+ * @len: Length of SG element data buffer, in bytes
+ * @rsvd: Word boundary padding
+ */
+struct pds_lm_sg_elem {
+ __le64 addr;
+ __le32 len;
+ __le16 rsvd[2];
+};
+
+/**
+ * struct pds_lm_save_cmd - SAVE command
+ * @opcode: Opcode PDS_LM_CMD_SAVE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: IOVA address of the SGL to dma the device state
+ * @num_sge: Total number of SG elements
+ */
+struct pds_lm_save_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[4];
+ __le64 sgl_addr;
+ __le32 num_sge;
+} __packed;
+
+/**
+ * struct pds_lm_restore_cmd - RESTORE command
+ * @opcode: Opcode PDS_LM_CMD_RESTORE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: IOVA address of the SGL to dma the device state
+ * @num_sge: Total number of SG elements
+ */
+struct pds_lm_restore_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[4];
+ __le64 sgl_addr;
+ __le32 num_sge;
+} __packed;
+
+/**
+ * union pds_lm_dev_state - device state information
+ * @words: Device state words
+ */
+union pds_lm_dev_state {
+ __le32 words[PDS_LM_DEVICE_STATE_LENGTH / sizeof(__le32)];
+};
+
+enum pds_lm_host_vf_status {
+ PDS_LM_STA_NONE = 0,
+ PDS_LM_STA_IN_PROGRESS,
+ PDS_LM_STA_MAX,
+};
+
+/**
+ * struct pds_lm_dirty_region_info - Memory region info for STATUS and ENABLE
+ * @dma_base: Base address of the DMA-contiguous memory region
+ * @page_count: Number of pages in the memory region
+ * @page_size_log2: Log2 page size in the memory region
+ * @rsvd: Word boundary padding
+ */
+struct pds_lm_dirty_region_info {
+ __le64 dma_base;
+ __le32 page_count;
+ u8 page_size_log2;
+ u8 rsvd[3];
+};
+
+/**
+ * struct pds_lm_dirty_status_cmd - DIRTY_STATUS command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @max_regions: Capacity of the region info buffer
+ * @rsvd2: Word boundary padding
+ * @regions_dma: DMA address of the region info buffer
+ *
+ * The minimum of max_regions (from the command) and num_regions (from the
+ * completion) of struct pds_lm_dirty_region_info will be written to
+ * regions_dma.
+ *
+ * The max_regions may be zero, in which case regions_dma is ignored. In that
+ * case, the completion will only report the maximum number of regions
+ * supported by the device, and the number of regions currently enabled.
+ */
+struct pds_lm_dirty_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 max_regions;
+ u8 rsvd2[3];
+ __le64 regions_dma;
+} __packed;
+
+/**
+ * enum pds_lm_dirty_bmp_type - Type of dirty page bitmap
+ * @PDS_LM_DIRTY_BMP_TYPE_NONE: No bitmap / disabled
+ * @PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK: Seq/Ack bitmap representation
+ */
+enum pds_lm_dirty_bmp_type {
+ PDS_LM_DIRTY_BMP_TYPE_NONE = 0,
+ PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK = 1,
+};
+
+/**
+ * struct pds_lm_dirty_status_comp - STATUS command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the desc ring for which this is the completion
+ * @max_regions: Maximum number of regions supported by the device
+ * @num_regions: Number of regions currently enabled
+ * @bmp_type: Type of dirty bitmap representation
+ * @rsvd2: Word boundary padding
+ * @bmp_type_mask: Mask of supported bitmap types, bit index per type
+ * @rsvd3: Word boundary padding
+ * @color: Color bit
+ *
+ * This completion descriptor is used for STATUS, ENABLE, and DISABLE.
+ */
+struct pds_lm_dirty_status_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 max_regions;
+ u8 num_regions;
+ u8 bmp_type;
+ u8 rsvd2;
+ __le32 bmp_type_mask;
+ u8 rsvd3[3];
+ u8 color;
+};
+
+/**
+ * struct pds_lm_dirty_enable_cmd - DIRTY_ENABLE command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_ENABLE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @bmp_type: Type of dirty bitmap representation
+ * @num_regions: Number of entries in the region info buffer
+ * @rsvd2: Word boundary padding
+ * @regions_dma: DMA address of the region info buffer
+ *
+ * The num_regions must be nonzero, and less than or equal to the maximum
+ * number of regions supported by the device.
+ *
+ * The memory regions should not overlap.
+ *
+ * The information should be initialized by the driver. The device may modify
+ * the information on successful completion, such as by size-aligning the
+ * number of pages in a region.
+ *
+ * The modified number of pages will be greater than or equal to the page count
+ * given in the enable command, and at least as coarsly aligned as the given
+ * value. For example, the count might be aligned to a multiple of 64, but
+ * if the value is already a multiple of 128 or higher, it will not change.
+ * If the driver requires its own minimum alignment of the number of pages, the
+ * driver should account for that already in the region info of this command.
+ *
+ * This command uses struct pds_lm_dirty_status_comp for its completion.
+ */
+struct pds_lm_dirty_enable_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 bmp_type;
+ u8 num_regions;
+ u8 rsvd2[2];
+ __le64 regions_dma;
+} __packed;
+
+/**
+ * struct pds_lm_dirty_disable_cmd - DIRTY_DISABLE command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_DISABLE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ *
+ * Dirty page tracking will be disabled. This may be called in any state, as
+ * long as dirty page tracking is supported by the device, to ensure that dirty
+ * page tracking is disabled.
+ *
+ * This command uses struct pds_lm_dirty_status_comp for its completion. On
+ * success, num_regions will be zero.
+ */
+struct pds_lm_dirty_disable_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_lm_dirty_seq_ack_cmd - DIRTY_READ_SEQ or _WRITE_ACK command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_[READ_SEQ|WRITE_ACK]
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @off_bytes: Byte offset in the bitmap
+ * @len_bytes: Number of bytes to transfer
+ * @num_sge: Number of DMA scatter gather elements
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: DMA address of scatter gather list
+ *
+ * Read bytes from the SEQ bitmap, or write bytes into the ACK bitmap.
+ *
+ * This command treats the entire bitmap as a byte buffer. It does not
+ * distinguish between guest memory regions. The driver should refer to the
+ * number of pages in each region, according to PDS_LM_CMD_DIRTY_STATUS, to
+ * determine the region boundaries in the bitmap. Each region will be
+ * represented by exactly the number of bits as the page count for that region,
+ * immediately following the last bit of the previous region.
+ */
+struct pds_lm_dirty_seq_ack_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ __le32 off_bytes;
+ __le32 len_bytes;
+ __le16 num_sge;
+ u8 rsvd2[2];
+ __le64 sgl_addr;
+} __packed;
+
+/**
+ * struct pds_lm_host_vf_status_cmd - HOST_VF_STATUS command
+ * @opcode: Opcode PDS_LM_CMD_HOST_VF_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @status: Current LM status of host VF driver (enum pds_lm_host_status)
+ */
+struct pds_lm_host_vf_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 status;
+};
+
union pds_core_adminq_cmd {
u8 opcode;
u8 bytes[64];
@@ -600,6 +1195,27 @@ union pds_core_adminq_cmd {
struct pds_core_q_identify_cmd q_ident;
struct pds_core_q_init_cmd q_init;
+
+ struct pds_vdpa_cmd vdpa;
+ struct pds_vdpa_init_cmd vdpa_init;
+ struct pds_vdpa_ident_cmd vdpa_ident;
+ struct pds_vdpa_status_cmd vdpa_status;
+ struct pds_vdpa_setattr_cmd vdpa_setattr;
+ struct pds_vdpa_set_features_cmd vdpa_set_features;
+ struct pds_vdpa_vq_init_cmd vdpa_vq_init;
+ struct pds_vdpa_vq_reset_cmd vdpa_vq_reset;
+
+ struct pds_lm_suspend_cmd lm_suspend;
+ struct pds_lm_suspend_status_cmd lm_suspend_status;
+ struct pds_lm_resume_cmd lm_resume;
+ struct pds_lm_state_size_cmd lm_state_size;
+ struct pds_lm_save_cmd lm_save;
+ struct pds_lm_restore_cmd lm_restore;
+ struct pds_lm_host_vf_status_cmd lm_host_vf_status;
+ struct pds_lm_dirty_status_cmd lm_dirty_status;
+ struct pds_lm_dirty_enable_cmd lm_dirty_enable;
+ struct pds_lm_dirty_disable_cmd lm_dirty_disable;
+ struct pds_lm_dirty_seq_ack_cmd lm_dirty_seq_ack;
};
union pds_core_adminq_comp {
@@ -621,6 +1237,12 @@ union pds_core_adminq_comp {
struct pds_core_q_identify_comp q_ident;
struct pds_core_q_init_comp q_init;
+
+ struct pds_vdpa_vq_init_comp vdpa_vq_init;
+ struct pds_vdpa_vq_reset_comp vdpa_vq_reset;
+
+ struct pds_lm_state_size_comp lm_state_size;
+ struct pds_lm_dirty_status_comp lm_dirty_status;
};
#ifndef __CHECKER__
diff --git a/include/linux/pds/pds_common.h b/include/linux/pds/pds_common.h
index 060331486d50..30581e2e04cc 100644
--- a/include/linux/pds/pds_common.h
+++ b/include/linux/pds/pds_common.h
@@ -34,35 +34,19 @@ enum pds_core_vif_types {
#define PDS_DEV_TYPE_CORE_STR "Core"
#define PDS_DEV_TYPE_VDPA_STR "vDPA"
-#define PDS_DEV_TYPE_VFIO_STR "VFio"
+#define PDS_DEV_TYPE_VFIO_STR "vfio"
#define PDS_DEV_TYPE_ETH_STR "Eth"
#define PDS_DEV_TYPE_RDMA_STR "RDMA"
#define PDS_DEV_TYPE_LM_STR "LM"
-#define PDS_CORE_IFNAMSIZ 16
+#define PDS_VDPA_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_VDPA_STR
+#define PDS_VFIO_LM_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_LM_STR "." PDS_DEV_TYPE_VFIO_STR
-/**
- * enum pds_core_logical_qtype - Logical Queue Types
- * @PDS_CORE_QTYPE_ADMINQ: Administrative Queue
- * @PDS_CORE_QTYPE_NOTIFYQ: Notify Queue
- * @PDS_CORE_QTYPE_RXQ: Receive Queue
- * @PDS_CORE_QTYPE_TXQ: Transmit Queue
- * @PDS_CORE_QTYPE_EQ: Event Queue
- * @PDS_CORE_QTYPE_MAX: Max queue type supported
- */
-enum pds_core_logical_qtype {
- PDS_CORE_QTYPE_ADMINQ = 0,
- PDS_CORE_QTYPE_NOTIFYQ = 1,
- PDS_CORE_QTYPE_RXQ = 2,
- PDS_CORE_QTYPE_TXQ = 3,
- PDS_CORE_QTYPE_EQ = 4,
-
- PDS_CORE_QTYPE_MAX = 16 /* don't change - used in struct size */
-};
+struct pdsc;
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
void *pdsc_get_pf_struct(struct pci_dev *vf_pdev);
-int pds_client_register(struct pci_dev *pf_pdev, char *devname);
-int pds_client_unregister(struct pci_dev *pf_pdev, u16 client_id);
+int pds_client_register(struct pdsc *pf, char *devname);
+int pds_client_unregister(struct pdsc *pf, u16 client_id);
#endif /* _PDS_COMMON_H_ */
diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h
index e838a2b90440..17a87c1a55d7 100644
--- a/include/linux/pds/pds_core_if.h
+++ b/include/linux/pds/pds_core_if.h
@@ -79,6 +79,7 @@ enum pds_core_status_code {
PDS_RC_EVFID = 31, /* VF ID does not exist */
PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */
PDS_RC_ECLIENT = 33, /* No such client id */
+ PDS_RC_BAD_PCI = 255, /* Broken PCI when reading status */
};
/**
diff --git a/include/linux/peci.h b/include/linux/peci.h
index 06e6ef935297..9b3d36aff431 100644
--- a/include/linux/peci.h
+++ b/include/linux/peci.h
@@ -42,13 +42,13 @@ struct peci_controller_ops {
*/
struct peci_controller {
struct device dev;
- struct peci_controller_ops *ops;
+ const struct peci_controller_ops *ops;
struct mutex bus_lock; /* held for the duration of xfer */
u8 id;
};
struct peci_controller *devm_peci_controller_add(struct device *parent,
- struct peci_controller_ops *ops);
+ const struct peci_controller_ops *ops);
static inline struct peci_controller *to_peci_controller(void *d)
{
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index e60727be79c4..ec3573119923 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -343,31 +343,19 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { }
pscr2_ret__; \
})
-/*
- * Special handling for cmpxchg_double. cmpxchg_double is passed two
- * percpu variables. The first has to be aligned to a double word
- * boundary and the second has to follow directly thereafter.
- * We enforce this on all architectures even if they don't support
- * a double cmpxchg instruction, since it's a cheap requirement, and it
- * avoids breaking the requirement for architectures with the instruction.
- */
-#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
+#define __pcpu_size_call_return2bool(stem, variable, ...) \
({ \
- bool pdcrb_ret__; \
- __verify_pcpu_ptr(&(pcp1)); \
- BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
- VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \
- VM_BUG_ON((unsigned long)(&(pcp2)) != \
- (unsigned long)(&(pcp1)) + sizeof(pcp1)); \
- switch(sizeof(pcp1)) { \
- case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
- case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
- case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
- case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
+ bool pscr2_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
+ switch(sizeof(variable)) { \
+ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
+ case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
+ case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
+ case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
default: \
__bad_size_call_parameter(); break; \
} \
- pdcrb_ret__; \
+ pscr2_ret__; \
})
#define __pcpu_size_call(stem, variable, ...) \
@@ -426,9 +414,8 @@ do { \
#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
#define raw_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
-#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
-
+#define raw_cpu_try_cmpxchg(pcp, ovalp, nval) \
+ __pcpu_size_call_return2bool(raw_cpu_try_cmpxchg_, pcp, ovalp, nval)
#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val))
#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1)
#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1)
@@ -488,11 +475,6 @@ do { \
raw_cpu_cmpxchg(pcp, oval, nval); \
})
-#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
-({ __this_cpu_preempt_check("cmpxchg_double"); \
- raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \
-})
-
#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val))
#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1)
#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1)
@@ -513,9 +495,8 @@ do { \
#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
#define this_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
-#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
-
+#define this_cpu_try_cmpxchg(pcp, ovalp, nval) \
+ __pcpu_size_call_return2bool(this_cpu_try_cmpxchg_, pcp, ovalp, nval)
#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val))
#define this_cpu_inc(pcp) this_cpu_add(pcp, 1)
#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1)
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 1338ea2aa720..8c677f185901 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -8,6 +8,7 @@
#include <linux/cpumask.h>
#include <linux/pfn.h>
#include <linux/init.h>
+#include <linux/cleanup.h>
#include <asm/percpu.h>
@@ -34,6 +35,12 @@
#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \
PCPU_MIN_ALLOC_SHIFT)
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+#define PERCPU_DYNAMIC_SIZE_SHIFT 12
+#else
+#define PERCPU_DYNAMIC_SIZE_SHIFT 10
+#endif
+
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
@@ -41,7 +48,7 @@
* for this. Keep PERCPU_DYNAMIC_RESERVE equal to or larger than
* PERCPU_DYNAMIC_EARLY_SIZE.
*/
-#define PERCPU_DYNAMIC_EARLY_SIZE (20 << 10)
+#define PERCPU_DYNAMIC_EARLY_SIZE (20 << PERCPU_DYNAMIC_SIZE_SHIFT)
/*
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
@@ -55,9 +62,9 @@
* intelligent way to determine this would be nice.
*/
#if BITS_PER_LONG > 32
-#define PERCPU_DYNAMIC_RESERVE (28 << 10)
+#define PERCPU_DYNAMIC_RESERVE (28 << PERCPU_DYNAMIC_SIZE_SHIFT)
#else
-#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#define PERCPU_DYNAMIC_RESERVE (20 << PERCPU_DYNAMIC_SIZE_SHIFT)
#endif
extern void *pcpu_base_addr;
@@ -103,12 +110,10 @@ extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr);
-#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
-#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
void __init pcpu_populate_pte(unsigned long addr);
@@ -127,6 +132,10 @@ extern void __init setup_per_cpu_areas(void);
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
extern void free_percpu(void __percpu *__pdata);
+extern size_t pcpu_alloc_size(void __percpu *__pdata);
+
+DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
+
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#define alloc_percpu_gfp(type, gfp) \
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 75b73c83bc9d..3a44dd1e33d2 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -30,22 +30,35 @@ struct percpu_counter {
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
- struct lock_class_key *key);
+int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp, u32 nr_counters,
+ struct lock_class_key *key);
-#define percpu_counter_init(fbc, value, gfp) \
+#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, gfp, &__key); \
+ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
+ &__key); \
})
-void percpu_counter_destroy(struct percpu_counter *fbc);
+
+#define percpu_counter_init(fbc, value, gfp) \
+ percpu_counter_init_many(fbc, value, gfp, 1)
+
+void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+ percpu_counter_destroy_many(fbc, 1);
+}
+
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit,
+ s64 amount, s32 batch);
void percpu_counter_sync(struct percpu_counter *fbc);
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
@@ -58,6 +71,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
+static inline bool
+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
+{
+ return __percpu_counter_limited_add(fbc, limit, amount,
+ percpu_counter_batch);
+}
+
/*
* With percpu_counter_add_local() and percpu_counter_sub_local(), counts
* are accumulated in local per cpu counter and not in fbc->count until
@@ -116,11 +136,27 @@ struct percpu_counter {
s64 count;
};
+static inline int percpu_counter_init_many(struct percpu_counter *fbc,
+ s64 amount, gfp_t gfp,
+ u32 nr_counters)
+{
+ u32 i;
+
+ for (i = 0; i < nr_counters; i++)
+ fbc[i].count = amount;
+
+ return 0;
+}
+
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
gfp_t gfp)
{
- fbc->count = amount;
- return 0;
+ return percpu_counter_init_many(fbc, amount, gfp, 1);
+}
+
+static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
+ u32 nr_counters)
+{
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
@@ -158,6 +194,27 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
local_irq_restore(flags);
}
+static inline bool
+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
+{
+ unsigned long flags;
+ bool good = false;
+ s64 count;
+
+ if (amount == 0)
+ return true;
+
+ local_irq_save(flags);
+ count = fbc->count + amount;
+ if ((amount > 0 && count <= limit) ||
+ (amount < 0 && count >= limit)) {
+ fbc->count = count;
+ good = true;
+ }
+ local_irq_restore(flags);
+ return good;
+}
+
/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
static inline void
percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 525b5d64e394..b3b34f6670cf 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -26,9 +26,11 @@
*/
#define ARMPMU_EVT_64BIT 0x00001 /* Event uses a 64bit counter */
#define ARMPMU_EVT_47BIT 0x00002 /* Event uses a 47bit counter */
+#define ARMPMU_EVT_63BIT 0x00004 /* Event uses a 63bit counter */
static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_64BIT) == ARMPMU_EVT_64BIT);
static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_47BIT) == ARMPMU_EVT_47BIT);
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_63BIT) == ARMPMU_EVT_63BIT);
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) PERF_COUNT_HW_CACHE_##_x
@@ -58,12 +60,6 @@ struct pmu_hw_events {
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
-
- /*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we
* already have to allocate this struct per cpu.
*/
@@ -171,6 +167,8 @@ void kvm_host_pmu_init(struct arm_pmu *pmu);
#define kvm_host_pmu_init(x) do { } while(0)
#endif
+bool arm_pmu_irq_is_nmi(void);
+
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
void armpmu_free(struct arm_pmu *pmu);
@@ -183,5 +181,28 @@ void armpmu_free_irq(int irq, int cpu);
#endif /* CONFIG_ARM_PMU */
#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
+#define ARMV8_TRBE_PDEV_NAME "arm,trbe"
+
+/* Why does everything I do descend into this? */
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name) \
+ PMU_FORMAT_ATTR(name, \
+ _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
+ ((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name) \
+ _ATTR_CFG_GET_FLD(attr, \
+ ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI)
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index e3899bd77f5c..46377e134d67 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -215,45 +215,54 @@
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
-#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
-#define ARMV8_PMU_PMCR_N_MASK 0x1f
-#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */
+#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */
+/* Mask for writable bits */
+#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \
+ ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \
+ ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \
+ ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP)
/*
* PMOVSR: counters overflow flag status reg
*/
-#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
+#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
+#define ARMV8_PMU_OVSR_C BIT(31)
+/* Mask for writable bits is both P and C fields */
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C)
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
-#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_TH GENMASK_ULL(43, 32) /* arm64 only */
+#define ARMV8_PMU_EVTYPE_TC GENMASK_ULL(63, 61) /* arm64 only */
/*
* Event filters for PMUv3
*/
-#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
-#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
-#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
+#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
+#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
+#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29)
+#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28)
+#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
+#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26)
/*
* PMUSERENR: user enable reg
*/
-#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+/* Mask for writable bits */
+#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
+ ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
/* PMMIR_EL1.SLOTS mask */
-#define ARMV8_PMU_SLOTS_MASK 0xff
-
-#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
-#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
-#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
-#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+#define ARMV8_PMU_SLOTS GENMASK(7, 0)
+#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8)
+#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16)
+#define ARMV8_PMU_THWIDTH GENMASK(23, 20)
/*
* This code is really good
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
index 43fc892aa7d9..43282e22ebe1 100644
--- a/include/linux/perf/riscv_pmu.h
+++ b/include/linux/perf/riscv_pmu.h
@@ -6,8 +6,8 @@
*
*/
-#ifndef _ASM_RISCV_PERF_EVENT_H
-#define _ASM_RISCV_PERF_EVENT_H
+#ifndef _RISCV_PMU_H
+#define _RISCV_PMU_H
#include <linux/perf_event.h>
#include <linux/ptrace.h>
@@ -21,7 +21,7 @@
#define RISCV_MAX_COUNTERS 64
#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
-#define RISCV_PMU_PDEV_NAME "riscv-pmu"
+#define RISCV_PMU_SBI_PDEV_NAME "riscv-pmu-sbi"
#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
#define RISCV_PMU_STOP_FLAG_RESET 1
@@ -55,6 +55,10 @@ struct riscv_pmu {
void (*ctr_start)(struct perf_event *event, u64 init_val);
void (*ctr_stop)(struct perf_event *event, unsigned long flag);
int (*event_map)(struct perf_event *event, u64 *config);
+ void (*event_init)(struct perf_event *event);
+ void (*event_mapped)(struct perf_event *event, struct mm_struct *mm);
+ void (*event_unmapped)(struct perf_event *event, struct mm_struct *mm);
+ uint8_t (*csr_index)(struct perf_event *event);
struct cpu_hw_events __percpu *hw_events;
struct hlist_node node;
@@ -81,4 +85,4 @@ int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr);
#endif /* CONFIG_RISCV_PMU */
-#endif /* _ASM_RISCV_PERF_EVENT_H */
+#endif /* _RISCV_PMU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c8dcfdbda1f4..d2a15c0c6f8a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -288,13 +288,14 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
#define PERF_PMU_CAP_EXCLUSIVE 0x0010
#define PERF_PMU_CAP_ITRACE 0x0020
-#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040
-#define PERF_PMU_CAP_NO_EXCLUDE 0x0080
-#define PERF_PMU_CAP_AUX_OUTPUT 0x0100
-#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200
+#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
+#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
+#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
struct perf_output_handle;
+#define PMU_NULL_DEV ((void *)(~0UL))
+
/**
* struct pmu - generic performance monitoring unit
*/
@@ -303,6 +304,7 @@ struct pmu {
struct module *module;
struct device *dev;
+ struct device *parent;
const struct attribute_group **attr_groups;
const struct attribute_group **attr_update;
const char *name;
@@ -442,7 +444,8 @@ struct pmu {
/*
* Will return the value for perf_event_mmap_page::index for this event,
- * if no implementation is provided it will default to: event->hw.idx + 1.
+ * if no implementation is provided it will default to 0 (see
+ * perf_event_idx_default).
*/
int (*event_idx) (struct perf_event *event); /*optional */
@@ -701,6 +704,7 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
+ unsigned int group_generation;
struct perf_event *group_leader;
/*
* event->pmu will always point to pmu in which this event belongs.
@@ -827,15 +831,23 @@ struct perf_event {
void *security;
#endif
struct list_head sb_list;
+
+ /*
+ * Certain events gets forwarded to another pmu internally by over-
+ * writing kernel copy of event->attr.type without user being aware
+ * of it. event->orig_type contains original 'type' requested by
+ * user.
+ */
+ __u32 orig_type;
#endif /* CONFIG_PERF_EVENTS */
};
/*
- * ,-----------------------[1:n]----------------------.
- * V V
- * perf_event_context <-[1:n]-> perf_event_pmu_context <--- perf_event
- * ^ ^ | |
- * `--------[1:n]---------' `-[n:1]-> pmu <-[1:n]-'
+ * ,-----------------------[1:n]------------------------.
+ * V V
+ * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
+ * | |
+ * `--[n:1]-> pmu <-[1:n]--'
*
*
* struct perf_event_pmu_context lifetime is refcount based and RCU freed
@@ -853,6 +865,9 @@ struct perf_event {
* ctx->mutex pinning the configuration. Since we hold a reference on
* group_leader (through the filedesc) it can't go away, therefore it's
* associated pmu_ctx must exist and cannot change due to ctx->mutex.
+ *
+ * perf_event holds a refcount on perf_event_context
+ * perf_event holds a refcount on perf_event_pmu_context
*/
struct perf_event_pmu_context {
struct pmu *pmu;
@@ -867,6 +882,7 @@ struct perf_event_pmu_context {
unsigned int embedded : 1;
unsigned int nr_events;
+ unsigned int nr_cgroups;
atomic_t refcount; /* event <-> epc */
struct rcu_head rcu_head;
@@ -1127,6 +1143,15 @@ static inline bool branch_sample_priv(const struct perf_event *event)
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
}
+static inline bool branch_sample_counters(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS;
+}
+
+static inline bool branch_sample_call_stack(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
+}
struct perf_sample_data {
/*
@@ -1161,6 +1186,7 @@ struct perf_sample_data {
struct perf_callchain_entry *callchain;
struct perf_raw_record *raw;
struct perf_branch_stack *br_stack;
+ u64 *br_stack_cntr;
union perf_sample_weight weight;
union perf_mem_data_src data_src;
u64 txn;
@@ -1183,7 +1209,8 @@ struct perf_sample_data {
PERF_MEM_S(LVL, NA) |\
PERF_MEM_S(SNOOP, NA) |\
PERF_MEM_S(LOCK, NA) |\
- PERF_MEM_S(TLB, NA))
+ PERF_MEM_S(TLB, NA) |\
+ PERF_MEM_S(LVLNUM, NA))
static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
@@ -1237,7 +1264,8 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
static inline void perf_sample_save_brstack(struct perf_sample_data *data,
struct perf_event *event,
- struct perf_branch_stack *brs)
+ struct perf_branch_stack *brs,
+ u64 *brs_cntr)
{
int size = sizeof(u64); /* nr */
@@ -1245,7 +1273,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data,
size += sizeof(u64);
size += brs->nr * sizeof(struct perf_branch_entry);
+ /*
+ * The extension space for counters is appended after the
+ * struct perf_branch_stack. It is used to store the occurrences
+ * of events of each branch.
+ */
+ if (brs_cntr)
+ size += brs->nr * sizeof(u64);
+
data->br_stack = brs;
+ data->br_stack_cntr = brs_cntr;
data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
@@ -1305,15 +1342,31 @@ extern int perf_event_output(struct perf_event *event,
struct pt_regs *regs);
static inline bool
-is_default_overflow_handler(struct perf_event *event)
+__is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
{
- if (likely(event->overflow_handler == perf_event_output_forward))
+ if (likely(overflow_handler == perf_event_output_forward))
return true;
- if (unlikely(event->overflow_handler == perf_event_output_backward))
+ if (unlikely(overflow_handler == perf_event_output_backward))
return true;
return false;
}
+#define is_default_overflow_handler(event) \
+ __is_default_overflow_handler((event)->overflow_handler)
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline bool uses_default_overflow_handler(struct perf_event *event)
+{
+ if (likely(is_default_overflow_handler(event)))
+ return true;
+
+ return __is_default_overflow_handler(event->orig_overflow_handler);
+}
+#else
+#define uses_default_overflow_handler(event) \
+ is_default_overflow_handler(event)
+#endif
+
extern void
perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
@@ -1545,7 +1598,7 @@ extern int sysctl_perf_cpu_time_max_percent;
extern void perf_sample_event_took(u64 sample_len_ns);
-int perf_proc_update_handler(struct ctl_table *table, int write,
+int perf_event_max_sample_rate_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
@@ -1849,10 +1902,6 @@ extern void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);
-#ifdef CONFIG_MMU
-extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
-#endif
-
/*
* Snapshot branch stack on software events.
*
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index c5a51481bbb9..f6d0e3513948 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -5,6 +5,9 @@
#include <linux/pfn.h>
#include <asm/pgtable.h>
+#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
+#define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT)
+
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
@@ -63,7 +66,6 @@ static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
-#define pte_index pte_index
#ifndef pmd_index
static inline unsigned long pmd_index(unsigned long address)
@@ -94,16 +96,26 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
#define pte_offset_kernel pte_offset_kernel
#endif
-#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
- pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte))
+#ifdef CONFIG_HIGHPTE
+#define __pte_map(pmd, address) \
+ ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
+#define pte_unmap(pte) do { \
+ kunmap_local((pte)); \
+ rcu_read_unlock(); \
+} while (0)
#else
-#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_unmap(pte) ((void)(pte)) /* NOP */
+static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
+{
+ return pte_offset_kernel(pmd, address);
+}
+static inline void pte_unmap(pte_t *pte)
+{
+ rcu_read_unlock();
+}
#endif
+void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
+
/* Find an entry in the second-level page table.. */
#ifndef pmd_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
@@ -172,6 +184,75 @@ static inline int pmd_young(pmd_t pmd)
}
#endif
+#ifndef pmd_dirty
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+#ifndef set_ptes
+
+#ifndef pte_next_pfn
+static inline pte_t pte_next_pfn(pte_t pte)
+{
+ return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+}
+#endif
+
+/**
+ * set_ptes - Map consecutive pages to a contiguous range of addresses.
+ * @mm: Address space to map the pages into.
+ * @addr: Address to map the first page at.
+ * @ptep: Page table pointer for the first entry.
+ * @pte: Page table entry for the first page.
+ * @nr: Number of pages to map.
+ *
+ * May be overridden by the architecture, or the architecture can define
+ * set_pte() and PFN_PTE_SHIFT.
+ *
+ * Context: The caller holds the page table lock. The pages all belong
+ * to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ page_table_check_ptes_set(mm, ptep, pte, nr);
+
+ arch_enter_lazy_mmu_mode();
+ for (;;) {
+ set_pte(ptep, pte);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte = pte_next_pfn(pte);
+ }
+ arch_leave_lazy_mmu_mode();
+}
+#endif
+#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -204,12 +285,47 @@ static inline int pudp_set_access_flags(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef ptep_get
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#endif
+
+#ifndef pmdp_get
+static inline pmd_t pmdp_get(pmd_t *pmdp)
+{
+ return READ_ONCE(*pmdp);
+}
+#endif
+
+#ifndef pudp_get
+static inline pud_t pudp_get(pud_t *pudp)
+{
+ return READ_ONCE(*pudp);
+}
+#endif
+
+#ifndef p4dp_get
+static inline p4d_t p4dp_get(p4d_t *p4dp)
+{
+ return READ_ONCE(*p4dp);
+}
+#endif
+
+#ifndef pgdp_get
+static inline pgd_t pgdp_get(pgd_t *pgdp)
+{
+ return READ_ONCE(*pgdp);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
- pte_t pte = *ptep;
+ pte_t pte = ptep_get(ptep);
int r = 1;
if (!pte_young(pte))
r = 0;
@@ -287,7 +403,21 @@ static inline bool arch_has_hw_nonleaf_pmd_young(void)
*/
static inline bool arch_has_hw_pte_young(void)
{
- return false;
+ return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
+}
+#endif
+
+#ifndef arch_check_zapped_pte
+static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
+ pte_t pte)
+{
+}
+#endif
+
+#ifndef arch_check_zapped_pmd
+static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
+ pmd_t pmd)
+{
}
#endif
@@ -296,9 +426,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
pte_t *ptep)
{
- pte_t pte = *ptep;
+ pte_t pte = ptep_get(ptep);
pte_clear(mm, address, ptep);
- page_table_check_pte_clear(mm, address, pte);
+ page_table_check_pte_clear(mm, pte);
return pte;
}
#endif
@@ -309,20 +439,6 @@ static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
ptep_get_and_clear(mm, addr, ptep);
}
-#ifndef ptep_get
-static inline pte_t ptep_get(pte_t *ptep)
-{
- return READ_ONCE(*ptep);
-}
-#endif
-
-#ifndef pmdp_get
-static inline pmd_t pmdp_get(pmd_t *pmdp)
-{
- return READ_ONCE(*pmdp);
-}
-#endif
-
#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
/*
* For walking the pagetables without holding any locks. Some architectures
@@ -382,6 +498,7 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
return pmd;
}
#define pmdp_get_lockless pmdp_get_lockless
+#define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
@@ -400,6 +517,9 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
{
return pmdp_get(pmdp);
}
+static inline void pmdp_get_lockless_sync(void)
+{
+}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -411,7 +531,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
- page_table_check_pmd_clear(mm, address, pmd);
+ page_table_check_pmd_clear(mm, pmd);
return pmd;
}
@@ -424,7 +544,7 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
pud_t pud = *pudp;
pud_clear(pudp);
- page_table_check_pud_clear(mm, address, pud);
+ page_table_check_pud_clear(mm, pud);
return pud;
}
@@ -442,11 +562,11 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
-static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
int full)
{
- return pudp_huge_get_and_clear(mm, address, pudp);
+ return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
}
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -507,11 +627,25 @@ extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
pud_t *pudp);
#endif
+#ifndef pte_mkwrite
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ return pte_mkwrite_novma(pte);
+}
+#endif
+
+#if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+ return pmd_mkwrite_novma(pmd);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
- pte_t old_pte = *ptep;
+ pte_t old_pte = ptep_get(ptep);
set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
}
#endif
@@ -550,6 +684,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
#endif
#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pudp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pud_t *pudp)
{
@@ -563,6 +698,7 @@ static inline void pudp_set_wrprotect(struct mm_struct *mm,
{
BUILD_BUG();
}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#endif
@@ -591,6 +727,10 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
+#ifndef arch_needs_pgtable_deposit
+#define arch_needs_pgtable_deposit() (false)
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* This is an implementation of pmdp_establish() that is only suitable for an
@@ -681,11 +821,14 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
}
+#endif
+#ifndef pud_same
static inline int pud_same(pud_t pud_a, pud_t pud_b)
{
return pud_val(pud_a) == pud_val(pud_b);
}
+#define pud_same pud_same
#endif
#ifndef __HAVE_ARCH_P4D_SAME
@@ -1029,27 +1172,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#endif
/*
- * A facility to provide lazy MMU batching. This allows PTE updates and
- * page invalidations to be delayed until a call to leave lazy MMU mode
- * is issued. Some architectures may benefit from doing this, and it is
- * beneficial for both shadow and direct mode hypervisors, which may batch
- * the PTE updates which happen during this window. Note that using this
- * interface requires that read hazards be removed from the code. A read
- * hazard could result in the direct mode hypervisor case, since the actual
- * write to the page tables may not yet have taken place, so reads though
- * a raw PTE pointer after it has been modified are not guaranteed to be
- * up to date. This mode can only be entered and left under the protection of
- * the page table locks for all page tables which may be modified. In the UP
- * case, this is required so that preemption is disabled, and in the SMP case,
- * it must synchronize the delayed page table writes properly on other CPUs.
- */
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define arch_enter_lazy_mmu_mode() do {} while (0)
-#define arch_leave_lazy_mmu_mode() do {} while (0)
-#define arch_flush_lazy_mmu_mode() do {} while (0)
-#endif
-
-/*
* A facility to provide batching of the reload of page tables and
* other process state with the actual context switch code for
* paravirtualized guests. By convention, only one of the batched
@@ -1292,9 +1414,10 @@ static inline int pud_trans_huge(pud_t pud)
}
#endif
-/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
-static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
+static inline int pud_trans_unstable(pud_t *pud)
{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pudval = READ_ONCE(*pud);
if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
@@ -1303,129 +1426,22 @@ static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
pud_clear_bad(pud);
return 1;
}
- return 0;
-}
-
-/* See pmd_trans_unstable for discussion. */
-static inline int pud_trans_unstable(pud_t *pud)
-{
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
- return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
-#else
- return 0;
-#endif
-}
-
-#ifndef arch_needs_pgtable_deposit
-#define arch_needs_pgtable_deposit() (false)
#endif
-/*
- * This function is meant to be used by sites walking pagetables with
- * the mmap_lock held in read mode to protect against MADV_DONTNEED and
- * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
- * into a null pmd and the transhuge page fault can convert a null pmd
- * into an hugepmd or into a regular pmd (if the hugepage allocation
- * fails). While holding the mmap_lock in read mode the pmd becomes
- * stable and stops changing under us only if it's not null and not a
- * transhuge pmd. When those races occurs and this function makes a
- * difference vs the standard pmd_none_or_clear_bad, the result is
- * undefined so behaving like if the pmd was none is safe (because it
- * can return none anyway). The compiler level barrier() is critically
- * important to compute the two checks atomically on the same pmdval.
- *
- * For 32bit kernels with a 64bit large pmd_t this automatically takes
- * care of reading the pmd atomically to avoid SMP race conditions
- * against pmd_populate() when the mmap_lock is hold for reading by the
- * caller (a special atomic read not done by "gcc" as in the generic
- * version above, is also needed when THP is disabled because the page
- * fault can populate the pmd from under us).
- */
-static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
-{
- pmd_t pmdval = pmdp_get_lockless(pmd);
- /*
- * The barrier will stabilize the pmdval in a register or on
- * the stack so that it will stop changing under the code.
- *
- * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
- * pmdp_get_lockless is allowed to return a not atomic pmdval
- * (for example pointing to an hugepage that has never been
- * mapped in the pmd). The below checks will only care about
- * the low part of the pmd with 32bit PAE x86 anyway, with the
- * exception of pmd_none(). So the important thing is that if
- * the low part of the pmd is found null, the high part will
- * be also null or the pmd_none() check below would be
- * confused.
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- barrier();
-#endif
- /*
- * !pmd_present() checks for pmd migration entries
- *
- * The complete check uses is_pmd_migration_entry() in linux/swapops.h
- * But using that requires moving current function and pmd_trans_unstable()
- * to linux/swapops.h to resolve dependency, which is too much code move.
- *
- * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
- * because !pmd_present() pages can only be under migration not swapped
- * out.
- *
- * pmd_none() is preserved for future condition checks on pmd migration
- * entries and not confusing with this function name, although it is
- * redundant with !pmd_present().
- */
- if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
- (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
- return 1;
- if (unlikely(pmd_bad(pmdval))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-
-/*
- * This is a noop if Transparent Hugepage Support is not built into
- * the kernel. Otherwise it is equivalent to
- * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
- * places that already verified the pmd is not none and they want to
- * walk ptes while holding the mmap sem in read mode (write mode don't
- * need this). If THP is not enabled, the pmd can't go away under the
- * code even if MADV_DONTNEED runs, but if THP is enabled we need to
- * run a pmd_trans_unstable before walking the ptes after
- * split_huge_pmd returns (because it may have run when the pmd become
- * null, but then a page fault can map in a THP and not a regular page).
- */
-static inline int pmd_trans_unstable(pmd_t *pmd)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- return pmd_none_or_trans_huge_or_clear_bad(pmd);
-#else
return 0;
-#endif
-}
-
-/*
- * the ordering of these checks is important for pmds with _page_devmap set.
- * if we check pmd_trans_unstable() first we will trip the bad_pmd() check
- * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly
- * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
- */
-static inline int pmd_devmap_trans_unstable(pmd_t *pmd)
-{
- return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
}
#ifndef CONFIG_NUMA_BALANCING
/*
- * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
- * the only case the kernel cares is for NUMA balancing and is only ever set
- * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
- * _PAGE_PROTNONE so by default, implement the helper as "always no". It
- * is the responsibility of the caller to distinguish between PROT_NONE
- * protections and NUMA hinting fault protections.
+ * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
+ * perfectly valid to indicate "no" in that case, which is why our default
+ * implementation defaults to "always no".
+ *
+ * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
+ * page protection due to NUMA hinting. NUMA hinting faults only apply in
+ * accessible VMAs.
+ *
+ * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
+ * looking at the VMA accessibility is sufficient.
*/
static inline int pte_protnone(pte_t pte)
{
@@ -1597,6 +1613,9 @@ typedef unsigned int pgtbl_mod_mask;
#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
#endif
+#ifndef has_transparent_pud_hugepage
+#define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+#endif
/*
* On some architectures it depends on the mm if the p4d/pud or pmd
* layer of the page table hierarchy is folded or not.
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6478838405a0..684efaeca07c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -86,6 +86,7 @@ extern const int phy_10gbit_features_array[1];
#define PHY_IS_INTERNAL 0x00000001
#define PHY_RST_AFTER_CLK_EN 0x00000002
#define PHY_POLL_CABLE_TEST 0x00000004
+#define PHY_ALWAYS_CALL_SUSPEND 0x00000008
#define MDIO_DEVICE_IS_PHY 0x80000000
/**
@@ -109,6 +110,7 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
* @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
* @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
+ * @PHY_INTERFACE_MODE_PSGMII: Penta SGMII
* @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
* @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
* @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
@@ -146,6 +148,7 @@ typedef enum {
PHY_INTERFACE_MODE_XGMII,
PHY_INTERFACE_MODE_XLGMII,
PHY_INTERFACE_MODE_MOCA,
+ PHY_INTERFACE_MODE_PSGMII,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
PHY_INTERFACE_MODE_100BASEX,
@@ -253,6 +256,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "xlgmii";
case PHY_INTERFACE_MODE_MOCA:
return "moca";
+ case PHY_INTERFACE_MODE_PSGMII:
+ return "psgmii";
case PHY_INTERFACE_MODE_QSGMII:
return "qsgmii";
case PHY_INTERFACE_MODE_TRGMII:
@@ -297,6 +302,7 @@ static inline const char *phy_modes(phy_interface_t interface)
#define MII_BUS_ID_SIZE 61
struct device;
+struct kernel_hwtstamp_config;
struct phylink;
struct sfp_bus;
struct sfp_upstream_ops;
@@ -321,7 +327,8 @@ struct mdio_bus_stats {
/**
* struct phy_package_shared - Shared information in PHY packages
- * @addr: Common PHY address used to combine PHYs in one package
+ * @base_addr: Base PHY address of PHY package used to combine PHYs
+ * in one package and for offset calculation of phy_package_read/write
* @refcnt: Number of PHYs connected to this shared data
* @flags: Initialization of PHY package
* @priv_size: Size of the shared private data @priv
@@ -332,7 +339,7 @@ struct mdio_bus_stats {
* phy_package_leave().
*/
struct phy_package_shared {
- int addr;
+ u8 base_addr;
refcount_t refcnt;
unsigned long flags;
size_t priv_size;
@@ -496,14 +503,17 @@ struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr);
* Once complete, move to UP to restart the PHY.
* - phy_stop aborts the running test and moves to @PHY_HALTED
*
- * @PHY_HALTED: PHY is up, but no polling or interrupts are done. Or
- * PHY is in an error state.
+ * @PHY_HALTED: PHY is up, but no polling or interrupts are done.
* - phy_start moves to @PHY_UP
+ *
+ * @PHY_ERROR: PHY is up, but is in an error state.
+ * - phy_stop moves to @PHY_HALTED
*/
enum phy_state {
PHY_DOWN = 0,
PHY_READY,
PHY_HALTED,
+ PHY_ERROR,
PHY_UP,
PHY_RUNNING,
PHY_NOLINK,
@@ -548,6 +558,8 @@ struct macsec_ops;
* @downshifted_rate: Set true if link speed has been downshifted.
* @is_on_sfp_module: Set true if PHY is located on an SFP module.
* @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
+ * @wol_enabled: Set to true if the PHY or the attached MAC have Wake-on-LAN
+ * enabled.
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
*
@@ -557,7 +569,6 @@ struct macsec_ops;
* - Bits [31:24] are reserved for defining generic
* PHY driver behavior.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
- * @phy_timer: The timer for handling the state machine
* @phylink: Pointer to phylink instance for this PHY
* @sfp_bus_attached: Flag indicating whether the SFP bus has been attached
* @sfp_bus: SFP bus attached to this PHY's fiber port
@@ -594,6 +605,8 @@ struct macsec_ops;
* @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
* requiring a rerun of the interrupt handler after resume
* @interface: enum phy_interface_t value
+ * @possible_interfaces: bitmap if interface modes that the attached PHY
+ * will switch between depending on media speed.
* @skb: Netlink message for cable diagnostics
* @nest: Netlink nest used for cable diagnostics
* @ehdr: nNtlink header for cable diagnostics
@@ -644,6 +657,7 @@ struct phy_device {
unsigned downshifted_rate:1;
unsigned is_on_sfp_module:1;
unsigned mac_managed_pm:1;
+ unsigned wol_enabled:1;
unsigned autoneg:1;
/* The most recently read link state */
@@ -662,6 +676,7 @@ struct phy_device {
u32 dev_flags;
phy_interface_t interface;
+ DECLARE_PHY_INTERFACE_MASK(possible_interfaces);
/*
* forced speed & duplex (no autoneg)
@@ -1097,6 +1112,39 @@ struct phy_driver {
int (*led_blink_set)(struct phy_device *dev, u8 index,
unsigned long *delay_on,
unsigned long *delay_off);
+ /**
+ * @led_hw_is_supported: Can the HW support the given rules.
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @rules The core is interested in these rules
+ *
+ * Return 0 if yes, -EOPNOTSUPP if not, or an error code.
+ */
+ int (*led_hw_is_supported)(struct phy_device *dev, u8 index,
+ unsigned long rules);
+ /**
+ * @led_hw_control_set: Set the HW to control the LED
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @rules The rules used to control the LED
+ *
+ * Returns 0, or a an error code.
+ */
+ int (*led_hw_control_set)(struct phy_device *dev, u8 index,
+ unsigned long rules);
+ /**
+ * @led_hw_control_get: Get how the HW is controlling the LED
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @rules Pointer to the rules used to control the LED
+ *
+ * Set *@rules to how the HW is currently blinking. Returns 0
+ * on success, or a error code if the current blinking cannot
+ * be represented in rules, or some other error happens.
+ */
+ int (*led_hw_control_get)(struct phy_device *dev, u8 index,
+ unsigned long *rules);
+
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
@@ -1108,6 +1156,34 @@ struct phy_driver {
#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+/**
+ * phy_id_compare - compare @id1 with @id2 taking account of @mask
+ * @id1: first PHY ID
+ * @id2: second PHY ID
+ * @mask: the PHY ID mask, set bits are significant in matching
+ *
+ * Return true if the bits from @id1 and @id2 specified by @mask match.
+ * This uses an equivalent test to (@id & @mask) == (@phy_id & @mask).
+ */
+static inline bool phy_id_compare(u32 id1, u32 id2, u32 mask)
+{
+ return !((id1 ^ id2) & mask);
+}
+
+/**
+ * phydev_id_compare - compare @id with the PHY's Clause 22 ID
+ * @phydev: the PHY device
+ * @id: the PHY ID to be matched
+ *
+ * Compare the @phydev clause 22 ID with the provided @id and return true or
+ * false depending whether it matches, using the bound driver mask. The
+ * @phydev must be bound to a driver.
+ */
+static inline bool phydev_id_compare(struct phy_device *phydev, u32 id)
+{
+ return phy_id_compare(id, phydev->phy_id, phydev->drv->phy_id_mask);
+}
+
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
@@ -1171,10 +1247,12 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
#define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \
timeout_us, sleep_before_read) \
({ \
- int __ret = read_poll_timeout(phy_read, val, val < 0 || (cond), \
+ int __ret, __val; \
+ __ret = read_poll_timeout(__val = phy_read, val, \
+ __val < 0 || (cond), \
sleep_us, timeout_us, sleep_before_read, phydev, regnum); \
- if (val < 0) \
- __ret = val; \
+ if (__val < 0) \
+ __ret = __val; \
if (__ret) \
phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
__ret; \
@@ -1267,11 +1345,13 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \
sleep_us, timeout_us, sleep_before_read) \
({ \
- int __ret = read_poll_timeout(phy_read_mmd, val, (cond) || val < 0, \
+ int __ret, __val; \
+ __ret = read_poll_timeout(__val = phy_read_mmd, val, \
+ __val < 0 || (cond), \
sleep_us, timeout_us, sleep_before_read, \
phydev, devaddr, regnum); \
- if (val < 0) \
- __ret = val; \
+ if (__val < 0) \
+ __ret = __val; \
if (__ret) \
phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
__ret; \
@@ -1483,9 +1563,11 @@ static inline bool phy_has_txtstamp(struct phy_device *phydev)
return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp;
}
-static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+static inline int phy_hwtstamp(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, cfg, extack);
}
static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
@@ -1659,6 +1741,7 @@ void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_config_aneg(struct phy_device *phydev);
+int _phy_start_aneg(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
int phy_speed_down(struct phy_device *phydev, bool sync);
@@ -1692,10 +1775,6 @@ int phy_start_cable_test_tdr(struct phy_device *phydev,
}
#endif
-int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result);
-int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair,
- u16 cm);
-
static inline void phy_device_reset(struct phy_device *phydev, int value)
{
mdio_device_reset(&phydev->mdio, value);
@@ -1787,6 +1866,8 @@ int genphy_c45_an_config_aneg(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
int genphy_c45_read_mdix(struct phy_device *phydev);
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+int genphy_c45_pma_read_ext_abilities(struct phy_device *phydev);
+int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev);
int genphy_c45_read_eee_abilities(struct phy_device *phydev);
int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);
int genphy_c45_read_status(struct phy_device *phydev);
@@ -1895,10 +1976,10 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev,
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
-int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size);
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
void phy_package_leave(struct phy_device *phydev);
int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int addr, size_t priv_size);
+ int base_addr, size_t priv_size);
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
@@ -1915,48 +1996,89 @@ int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
int phy_ethtool_get_plca_status(struct phy_device *phydev,
struct phy_plca_status *plca_st);
-static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
+int __phy_hwtstamp_get(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config);
+int __phy_hwtstamp_set(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
+static inline int phy_package_address(struct phy_device *phydev,
+ unsigned int addr_offset)
{
struct phy_package_shared *shared = phydev->shared;
+ u8 base_addr = shared->base_addr;
- if (!shared)
+ if (addr_offset >= PHY_MAX_ADDR - base_addr)
return -EIO;
- return mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+ /* we know that addr will be in the range 0..31 and thus the
+ * implicit cast to a signed int is not a problem.
+ */
+ return base_addr + addr_offset;
}
-static inline int __phy_package_read(struct phy_device *phydev, u32 regnum)
+static inline int phy_package_read(struct phy_device *phydev,
+ unsigned int addr_offset, u32 regnum)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
+
+ return mdiobus_read(phydev->mdio.bus, addr, regnum);
+}
+
+static inline int __phy_package_read(struct phy_device *phydev,
+ unsigned int addr_offset, u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
- return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
+ return __mdiobus_read(phydev->mdio.bus, addr, regnum);
}
static inline int phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ unsigned int addr_offset, u32 regnum,
+ u16 val)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
- return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+ return mdiobus_write(phydev->mdio.bus, addr, regnum, val);
}
static inline int __phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
+ unsigned int addr_offset, u32 regnum,
+ u16 val)
{
- struct phy_package_shared *shared = phydev->shared;
+ int addr = phy_package_address(phydev, addr_offset);
- if (!shared)
- return -EIO;
+ if (addr < 0)
+ return addr;
- return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
+ return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
}
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+
+int phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+
+int phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+
static inline bool __phy_package_set_once(struct phy_device *phydev,
unsigned int b)
{
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 3a570bc59fc7..f6d607ef0e80 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -148,6 +148,7 @@ struct phy_attrs {
* @power_count: used to protect when the PHY is used by multiple consumers
* @attrs: used to specify PHY specific attributes
* @pwr: power regulator associated with the phy
+ * @debugfs: debugfs directory
*/
struct phy {
struct device dev;
@@ -158,6 +159,7 @@ struct phy {
int power_count;
struct phy_attrs attrs;
struct regulator *pwr;
+ struct dentry *debugfs;
};
/**
diff --git a/include/linux/phylib_stubs.h b/include/linux/phylib_stubs.h
new file mode 100644
index 000000000000..1279f48c8a70
--- /dev/null
+++ b/include/linux/phylib_stubs.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Stubs for the Network PHY library
+ */
+
+#include <linux/rtnetlink.h>
+
+struct kernel_hwtstamp_config;
+struct netlink_ext_ack;
+struct phy_device;
+
+#if IS_ENABLED(CONFIG_PHYLIB)
+
+extern const struct phylib_stubs *phylib_stubs;
+
+struct phylib_stubs {
+ int (*hwtstamp_get)(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config);
+ int (*hwtstamp_set)(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+};
+
+static inline int phy_hwtstamp_get(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config)
+{
+ /* phylib_register_stubs() and phylib_unregister_stubs()
+ * also run under rtnl_lock().
+ */
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return -EOPNOTSUPP;
+
+ return phylib_stubs->hwtstamp_get(phydev, config);
+}
+
+static inline int phy_hwtstamp_set(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ /* phylib_register_stubs() and phylib_unregister_stubs()
+ * also run under rtnl_lock().
+ */
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return -EOPNOTSUPP;
+
+ return phylib_stubs->hwtstamp_set(phydev, config, extack);
+}
+
+#else
+
+static inline int phy_hwtstamp_get(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int phy_hwtstamp_set(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 71755c66c162..d589f89c612c 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -9,6 +9,7 @@ struct device_node;
struct ethtool_cmd;
struct fwnode_handle;
struct net_device;
+struct phylink;
enum {
MLO_PAUSE_NONE,
@@ -21,6 +22,24 @@ enum {
MLO_AN_FIXED, /* Fixed-link mode */
MLO_AN_INBAND, /* In-band protocol */
+ /* PCS "negotiation" mode.
+ * PHYLINK_PCS_NEG_NONE - protocol has no inband capability
+ * PHYLINK_PCS_NEG_OUTBAND - some out of band or fixed link setting
+ * PHYLINK_PCS_NEG_INBAND_DISABLED - inband mode disabled, e.g.
+ * 1000base-X with autoneg off
+ * PHYLINK_PCS_NEG_INBAND_ENABLED - inband mode enabled
+ * Additionally, this can be tested using bitmasks:
+ * PHYLINK_PCS_NEG_INBAND - inband mode selected
+ * PHYLINK_PCS_NEG_ENABLED - negotiation mode enabled
+ */
+ PHYLINK_PCS_NEG_NONE = 0,
+ PHYLINK_PCS_NEG_ENABLED = BIT(4),
+ PHYLINK_PCS_NEG_OUTBAND = BIT(5),
+ PHYLINK_PCS_NEG_INBAND = BIT(6),
+ PHYLINK_PCS_NEG_INBAND_DISABLED = PHYLINK_PCS_NEG_INBAND,
+ PHYLINK_PCS_NEG_INBAND_ENABLED = PHYLINK_PCS_NEG_INBAND |
+ PHYLINK_PCS_NEG_ENABLED,
+
/* MAC_SYM_PAUSE and MAC_ASYM_PAUSE are used when configuring our
* autonegotiation advertisement. They correspond to the PAUSE and
* ASM_DIR bits defined by 802.3, respectively.
@@ -116,8 +135,6 @@ enum phylink_op_type {
* struct phylink_config - PHYLINK configuration structure
* @dev: a pointer to a struct device associated with the MAC
* @type: operation type of PHYLINK instance
- * @legacy_pre_march2020: driver has not been updated for March 2020 updates
- * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls")
* @poll_fixed_state: if true, starts link_poll,
* if MAC link is at %MLO_AN_FIXED mode.
* @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
@@ -131,7 +148,6 @@ enum phylink_op_type {
struct phylink_config {
struct device *dev;
enum phylink_op_type type;
- bool legacy_pre_march2020;
bool poll_fixed_state;
bool mac_managed_pm;
bool ovr_an_inband;
@@ -141,35 +157,31 @@ struct phylink_config {
unsigned long mac_capabilities;
};
+void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
+
/**
* struct phylink_mac_ops - MAC operations structure.
- * @validate: Validate and update the link configuration.
+ * @mac_get_caps: Get MAC capabilities for interface mode.
* @mac_select_pcs: Select a PCS for the interface mode.
- * @mac_pcs_get_state: Read the current link state from the hardware.
* @mac_prepare: prepare for a major reconfiguration of the interface.
* @mac_config: configure the MAC for the selected mode and state.
* @mac_finish: finish a major reconfiguration of the interface.
- * @mac_an_restart: restart 802.3z BaseX autonegotiation.
* @mac_link_down: take the link down.
* @mac_link_up: allow the link to come up.
*
* The individual methods are described more fully below.
*/
struct phylink_mac_ops {
- void (*validate)(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state);
+ unsigned long (*mac_get_caps)(struct phylink_config *config,
+ phy_interface_t interface);
struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
phy_interface_t interface);
- void (*mac_pcs_get_state)(struct phylink_config *config,
- struct phylink_link_state *state);
int (*mac_prepare)(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
void (*mac_config)(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
int (*mac_finish)(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
- void (*mac_an_restart)(struct phylink_config *config);
void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
void (*mac_link_up)(struct phylink_config *config,
@@ -180,39 +192,17 @@ struct phylink_mac_ops {
#if 0 /* For kernel-doc purposes only. */
/**
- * validate - Validate and update the link configuration
+ * mac_get_caps: Get MAC capabilities for interface mode.
* @config: a pointer to a &struct phylink_config.
- * @supported: ethtool bitmask for supported link modes.
- * @state: a pointer to a &struct phylink_link_state.
- *
- * Clear bits in the @supported and @state->advertising masks that
- * are not supportable by the MAC.
+ * @interface: PHY interface mode.
*
- * Note that the PHY may be able to transform from one connection
- * technology to another, so, eg, don't clear 1000BaseX just
- * because the MAC is unable to BaseX mode. This is more about
- * clearing unsupported speeds and duplex settings. The port modes
- * should not be cleared; phylink_set_port_modes() will help with this.
- *
- * When @config->supported_interfaces has been set, phylink will iterate
- * over the supported interfaces to determine the full capability of the
- * MAC. The validation function must not print errors if @state->interface
- * is set to an unexpected value.
- *
- * When @config->supported_interfaces is empty, phylink will call this
- * function with @state->interface set to %PHY_INTERFACE_MODE_NA, and
- * expects the MAC driver to return all supported link modes.
- *
- * If the @state->interface mode is not supported, then the @supported
- * mask must be cleared.
- *
- * This member is optional; if not set, the generic validator will be
- * used making use of @config->mac_capabilities and
- * @config->supported_interfaces to determine which link modes are
- * supported.
+ * Optional method. When not provided, config->mac_capabilities will be used.
+ * When implemented, this returns the MAC capabilities for the specified
+ * interface mode where there is some special handling required by the MAC
+ * driver (e.g. not supporting half-duplex in certain interface modes.)
*/
-void validate(struct phylink_config *config, unsigned long *supported,
- struct phylink_link_state *state);
+unsigned long mac_get_caps(struct phylink_config *config,
+ phy_interface_t interface);
/**
* mac_select_pcs: Select a PCS for the interface mode.
* @config: a pointer to a &struct phylink_config.
@@ -230,25 +220,6 @@ struct phylink_pcs *mac_select_pcs(struct phylink_config *config,
phy_interface_t interface);
/**
- * mac_pcs_get_state() - Read the current inband link state from the hardware
- * @config: a pointer to a &struct phylink_config.
- * @state: a pointer to a &struct phylink_link_state.
- *
- * Read the current inband link state from the MAC PCS, reporting the
- * current speed in @state->speed, duplex mode in @state->duplex, pause
- * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
- * negotiation completion state in @state->an_complete, and link up state
- * in @state->link. If possible, @state->lp_advertising should also be
- * populated.
- *
- * Note: This is a legacy method. This function will not be called unless
- * legacy_pre_march2020 is set in &struct phylink_config and there is no
- * PCS attached.
- */
-void mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state);
-
-/**
* mac_prepare() - prepare to change the PHY interface mode
* @config: a pointer to a &struct phylink_config.
* @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
@@ -284,17 +255,9 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* guaranteed to be correct, and so any mac_config() implementation must
* never reference these fields.
*
- * Note: For legacy March 2020 drivers (drivers with legacy_pre_march2020 set
- * in their &phylnk_config and which don't have a PCS), this function will be
- * called on each link up event, and to also change the in-band advert. For
- * non-legacy drivers, it will only be called to reconfigure the MAC for a
- * "major" change in e.g. interface mode. It will not be called for changes
- * in speed, duplex or pause modes or to change the in-band advertisement.
- * In any case, it is strongly preferred that speed, duplex and pause settings
- * are handled in the mac_link_up() method and not in this method.
- *
- * (this requires a rewrite - please refer to mac_link_up() for situations
- * where the PCS and MAC are not tightly integrated.)
+ * This will only be called to reconfigure the MAC for a "major" change in
+ * e.g. interface mode. It will not be called for changes in speed, duplex
+ * or pause modes or to change the in-band advertisement.
*
* In all negotiation modes, as defined by @mode, @state->pause indicates the
* pause settings which should be applied as follows. If %MLO_PAUSE_AN is not
@@ -326,7 +289,7 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* 1000base-X or Cisco SGMII mode depending on the @state->interface
* mode). In both cases, link state management (whether the link
* is up or not) is performed by the MAC, and reported via the
- * mac_pcs_get_state() callback. Changes in link state must be made
+ * pcs_get_state() callback. Changes in link state must be made
* by calling phylink_mac_change().
*
* Interface mode specific details are mentioned below.
@@ -375,16 +338,6 @@ int mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
/**
- * mac_an_restart() - restart 802.3z BaseX autonegotiation
- * @config: a pointer to a &struct phylink_config.
- *
- * Note: This is a legacy method. This function will not be called unless
- * legacy_pre_march2020 is set in &struct phylink_config and there is no
- * PCS attached.
- */
-void mac_an_restart(struct phylink_config *config);
-
-/**
* mac_link_down() - take the link down
* @config: a pointer to a &struct phylink_config.
* @mode: link autonegotiation mode
@@ -436,19 +389,30 @@ struct phylink_pcs_ops;
/**
* struct phylink_pcs - PHYLINK PCS instance
* @ops: a pointer to the &struct phylink_pcs_ops structure
+ * @phylink: pointer to &struct phylink_config
+ * @neg_mode: provide PCS neg mode via "mode" argument
* @poll: poll the PCS for link changes
*
* This structure is designed to be embedded within the PCS private data,
* and will be passed between phylink and the PCS.
+ *
+ * The @phylink member is private to phylink and must not be touched by
+ * the PCS driver.
*/
struct phylink_pcs {
const struct phylink_pcs_ops *ops;
+ struct phylink *phylink;
+ bool neg_mode;
bool poll;
};
/**
* struct phylink_pcs_ops - MAC PCS operations structure.
* @pcs_validate: validate the link configuration.
+ * @pcs_enable: enable the PCS.
+ * @pcs_disable: disable the PCS.
+ * @pcs_pre_config: pre-mac_config method (for errata)
+ * @pcs_post_config: post-mac_config method (for arrata)
* @pcs_get_state: read the current MAC PCS link state from the hardware.
* @pcs_config: configure the MAC PCS for the selected mode and state.
* @pcs_an_restart: restart 802.3z BaseX autonegotiation.
@@ -458,14 +422,20 @@ struct phylink_pcs {
struct phylink_pcs_ops {
int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported,
const struct phylink_link_state *state);
+ int (*pcs_enable)(struct phylink_pcs *pcs);
+ void (*pcs_disable)(struct phylink_pcs *pcs);
+ void (*pcs_pre_config)(struct phylink_pcs *pcs,
+ phy_interface_t interface);
+ int (*pcs_post_config)(struct phylink_pcs *pcs,
+ phy_interface_t interface);
void (*pcs_get_state)(struct phylink_pcs *pcs,
struct phylink_link_state *state);
- int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode,
+ int (*pcs_config)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac);
void (*pcs_an_restart)(struct phylink_pcs *pcs);
- void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode,
+ void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
};
@@ -488,6 +458,18 @@ int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
const struct phylink_link_state *state);
/**
+ * pcs_enable() - enable the PCS.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ */
+int pcs_enable(struct phylink_pcs *pcs);
+
+/**
+ * pcs_disable() - disable the PCS.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ */
+void pcs_disable(struct phylink_pcs *pcs);
+
+/**
* pcs_get_state() - Read the current inband link state from the hardware
* @pcs: a pointer to a &struct phylink_pcs.
* @state: a pointer to a &struct phylink_link_state.
@@ -499,8 +481,8 @@ int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
* in @state->link. If possible, @state->lp_advertising should also be
* populated.
*
- * When present, this overrides mac_pcs_get_state() in &struct
- * phylink_mac_ops.
+ * When present, this overrides pcs_get_state() in &struct
+ * phylink_pcs_ops.
*/
void pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state);
@@ -508,7 +490,7 @@ void pcs_get_state(struct phylink_pcs *pcs,
/**
* pcs_config() - Configure the PCS mode and advertisement
* @pcs: a pointer to a &struct phylink_pcs.
- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @neg_mode: link negotiation mode (see below)
* @interface: interface mode to be used
* @advertising: adertisement ethtool link mode mask
* @permit_pause_to_mac: permit forwarding pause resolution to MAC
@@ -526,8 +508,12 @@ void pcs_get_state(struct phylink_pcs *pcs,
* For 1000BASE-X, the advertisement should be programmed into the PCS.
*
* For most 10GBASE-R, there is no advertisement.
+ *
+ * The %neg_mode argument should be tested via the phylink_mode_*() family of
+ * functions, or for PCS that set pcs->neg_mode true, should be tested
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
-int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, const unsigned long *advertising,
bool permit_pause_to_mac);
@@ -543,7 +529,7 @@ void pcs_an_restart(struct phylink_pcs *pcs);
/**
* pcs_link_up() - program the PCS for the resolved link configuration
* @pcs: a pointer to a &struct phylink_pcs.
- * @mode: link autonegotiation mode
+ * @neg_mode: link negotiation mode (see below)
* @interface: link &typedef phy_interface_t mode
* @speed: link speed
* @duplex: link duplex
@@ -552,36 +538,31 @@ void pcs_an_restart(struct phylink_pcs *pcs);
* the resolved link parameters. For example, a PCS operating in SGMII
* mode without in-band AN needs to be manually configured for the link
* and duplex setting. Otherwise, this should be a no-op.
+ *
+ * The %mode argument should be tested via the phylink_mode_*() family of
+ * functions, or for PCS that set pcs->neg_mode true, should be tested
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
-void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
#endif
-void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps);
-unsigned long phylink_get_capabilities(phy_interface_t interface,
- unsigned long mac_capabilities,
- int rate_matching);
-void phylink_validate_mask_caps(unsigned long *supported,
- struct phylink_link_state *state,
- unsigned long caps);
-void phylink_generic_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state);
-
-struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
- phy_interface_t iface,
- const struct phylink_mac_ops *mac_ops);
+struct phylink *phylink_create(struct phylink_config *,
+ const struct fwnode_handle *,
+ phy_interface_t,
+ const struct phylink_mac_ops *);
void phylink_destroy(struct phylink *);
bool phylink_expects_phy(struct phylink *pl);
int phylink_connect_phy(struct phylink *, struct phy_device *);
int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
int phylink_fwnode_phy_connect(struct phylink *pl,
- struct fwnode_handle *fwnode,
+ const struct fwnode_handle *fwnode,
u32 flags);
void phylink_disconnect_phy(struct phylink *);
void phylink_mac_change(struct phylink *, bool up);
+void phylink_pcs_change(struct phylink_pcs *, bool up);
void phylink_start(struct phylink *);
void phylink_stop(struct phylink *);
@@ -650,11 +631,14 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state);
int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
const unsigned long *advertising);
-int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
+int phylink_mii_c22_pcs_config(struct mdio_device *pcs,
phy_interface_t interface,
- const unsigned long *advertising);
+ const unsigned long *advertising,
+ unsigned int neg_mode);
void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
+void phylink_resolve_c73(struct phylink_link_state *state);
+
void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state);
diff --git a/include/linux/pid.h b/include/linux/pid.h
index b75de288a8c2..395cacce1179 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -2,18 +2,12 @@
#ifndef _LINUX_PID_H
#define _LINUX_PID_H
+#include <linux/pid_types.h>
#include <linux/rculist.h>
-#include <linux/wait.h>
+#include <linux/rcupdate.h>
#include <linux/refcount.h>
-
-enum pid_type
-{
- PIDTYPE_PID,
- PIDTYPE_TGID,
- PIDTYPE_PGID,
- PIDTYPE_SID,
- PIDTYPE_MAX,
-};
+#include <linux/sched.h>
+#include <linux/wait.h>
/*
* What is struct pid?
@@ -67,7 +61,7 @@ struct pid
/* wait queue for pidfd notifications */
wait_queue_head_t wait_pidfd;
struct rcu_head rcu;
- struct upid numbers[1];
+ struct upid numbers[];
};
extern struct pid init_struct_pid;
@@ -110,9 +104,6 @@ extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
-struct pid_namespace;
-extern struct pid_namespace init_pid_ns;
-
extern int pid_max;
extern int pid_max_min, pid_max_max;
@@ -215,4 +206,127 @@ pid_t pid_vnr(struct pid *pid);
} \
task = tg___; \
} while_each_pid_task(pid, type, task)
+
+static inline struct pid *task_pid(struct task_struct *task)
+{
+ return task->thread_pid;
+}
+
+/*
+ * the helpers to get the task's different pids as they are seen
+ * from various namespaces
+ *
+ * task_xid_nr() : global id, i.e. the id seen from the init namespace;
+ * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
+ * current.
+ * task_xid_nr_ns() : id seen from the ns specified;
+ *
+ * see also pid_nr() etc in include/linux/pid.h
+ */
+pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+ return tsk->pid;
+}
+
+static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
+}
+
+static inline pid_t task_pid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
+}
+
+
+static inline pid_t task_tgid_nr(struct task_struct *tsk)
+{
+ return tsk->tgid;
+}
+
+/**
+ * pid_alive - check that a task structure is not stale
+ * @p: Task structure to be checked.
+ *
+ * Test if a process is not yet dead (at most zombie state)
+ * If pid_alive fails, then pointers within the task structure
+ * can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
+ */
+static inline int pid_alive(const struct task_struct *p)
+{
+ return p->thread_pid != NULL;
+}
+
+static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+}
+
+static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
+}
+
+
+static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
+}
+
+static inline pid_t task_session_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
+}
+
+static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
+}
+
+static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
+}
+
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
+/* Obsolete, do not use: */
+static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+{
+ return task_pgrp_nr_ns(tsk, &init_pid_ns);
+}
+
+/**
+ * is_global_init - check if a task structure is init. Since init
+ * is free to have sub-threads we need to check tgid.
+ * @tsk: Task structure to be checked.
+ *
+ * Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
+ */
+static inline int is_global_init(struct task_struct *tsk)
+{
+ return task_tgid_nr(tsk) == 1;
+}
+
#endif /* _LINUX_PID_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index c758809d5bcf..f9f9931e02d6 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -17,18 +17,10 @@
struct fs_pin;
#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
-/*
- * sysctl for vm.memfd_noexec
- * 0: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
- * acts like MFD_EXEC was set.
- * 1: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
- * acts like MFD_NOEXEC_SEAL was set.
- * 2: memfd_create() without MFD_NOEXEC_SEAL will be
- * rejected.
- */
-#define MEMFD_NOEXEC_SCOPE_EXEC 0
-#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1
-#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2
+/* modes for vm.memfd_noexec sysctl */
+#define MEMFD_NOEXEC_SCOPE_EXEC 0 /* MFD_EXEC implied if unset */
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1 /* MFD_NOEXEC_SEAL implied if unset */
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2 /* same as 1, except MFD_EXEC rejected */
#endif
struct pid_namespace {
@@ -47,7 +39,6 @@ struct pid_namespace {
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
- /* sysctl for vm.memfd_noexec */
int memfd_noexec_scope;
#endif
} __randomize_layout;
@@ -64,6 +55,23 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
return ns;
}
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ int scope = MEMFD_NOEXEC_SCOPE_EXEC;
+
+ for (; ns; ns = ns->parent)
+ scope = max(scope, READ_ONCE(ns->memfd_noexec_scope));
+
+ return scope;
+}
+#else
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ return 0;
+}
+#endif
+
extern struct pid_namespace *copy_pid_ns(unsigned long flags,
struct user_namespace *user_ns, struct pid_namespace *ns);
extern void zap_pid_ns_processes(struct pid_namespace *pid_ns);
@@ -78,6 +86,11 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
return ns;
}
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ return 0;
+}
+
static inline struct pid_namespace *copy_pid_ns(unsigned long flags,
struct user_namespace *user_ns, struct pid_namespace *ns)
{
diff --git a/include/linux/pid_types.h b/include/linux/pid_types.h
new file mode 100644
index 000000000000..c2aee1d91dcf
--- /dev/null
+++ b/include/linux/pid_types.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PID_TYPES_H
+#define _LINUX_PID_TYPES_H
+
+enum pid_type {
+ PIDTYPE_PID,
+ PIDTYPE_TGID,
+ PIDTYPE_PGID,
+ PIDTYPE_SID,
+ PIDTYPE_MAX,
+};
+
+struct pid_namespace;
+extern struct pid_namespace init_pid_ns;
+
+#endif /* _LINUX_PID_TYPES_H */
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 4729d54e8995..73de70362b98 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -17,6 +17,7 @@
#include <linux/pinctrl/pinctrl-state.h>
struct device;
+struct gpio_chip;
/* This struct is private to the core and should be regarded as a cookie */
struct pinctrl;
@@ -25,27 +26,30 @@ struct pinctrl_state;
#ifdef CONFIG_PINCTRL
/* External interface to pin control */
-extern bool pinctrl_gpio_can_use_line(unsigned gpio);
-extern int pinctrl_gpio_request(unsigned gpio);
-extern void pinctrl_gpio_free(unsigned gpio);
-extern int pinctrl_gpio_direction_input(unsigned gpio);
-extern int pinctrl_gpio_direction_output(unsigned gpio);
-extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
-
-extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
-extern void pinctrl_put(struct pinctrl *p);
-extern struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
- const char *name);
-extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
-
-extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
-extern void devm_pinctrl_put(struct pinctrl *p);
-extern int pinctrl_select_default_state(struct device *dev);
+bool pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset);
+void pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset);
+int pinctrl_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset);
+int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config);
+
+struct pinctrl * __must_check pinctrl_get(struct device *dev);
+void pinctrl_put(struct pinctrl *p);
+struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
+ const char *name);
+int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
+
+struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
+void devm_pinctrl_put(struct pinctrl *p);
+int pinctrl_select_default_state(struct device *dev);
#ifdef CONFIG_PM
-extern int pinctrl_pm_select_default_state(struct device *dev);
-extern int pinctrl_pm_select_sleep_state(struct device *dev);
-extern int pinctrl_pm_select_idle_state(struct device *dev);
+int pinctrl_pm_select_default_state(struct device *dev);
+int pinctrl_pm_select_sleep_state(struct device *dev);
+int pinctrl_pm_select_idle_state(struct device *dev);
#else
static inline int pinctrl_pm_select_default_state(struct device *dev)
{
@@ -63,31 +67,38 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#else /* !CONFIG_PINCTRL */
-static inline bool pinctrl_gpio_can_use_line(unsigned gpio)
+static inline bool
+pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset)
{
return true;
}
-static inline int pinctrl_gpio_request(unsigned gpio)
+static inline int
+pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline void pinctrl_gpio_free(unsigned gpio)
+static inline void
+pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
}
-static inline int pinctrl_gpio_direction_input(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline int pinctrl_gpio_direction_output(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+static inline int
+pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
{
return 0;
}
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index 0639b36f43c5..673e96df453b 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -11,7 +11,7 @@
#ifndef __LINUX_PINCTRL_MACHINE_H
#define __LINUX_PINCTRL_MACHINE_H
-#include <linux/kernel.h> /* ARRAY_SIZE() */
+#include <linux/array_size.h>
#include <linux/pinctrl/pinctrl-state.h>
@@ -47,7 +47,7 @@ struct pinctrl_map_mux {
struct pinctrl_map_configs {
const char *group_or_pin;
unsigned long *configs;
- unsigned num_configs;
+ unsigned int num_configs;
};
/**
@@ -154,13 +154,13 @@ struct pinctrl_map;
#ifdef CONFIG_PINCTRL
extern int pinctrl_register_mappings(const struct pinctrl_map *map,
- unsigned num_maps);
+ unsigned int num_maps);
extern void pinctrl_unregister_mappings(const struct pinctrl_map *map);
extern void pinctrl_provide_dummies(void);
#else
static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
- unsigned num_maps)
+ unsigned int num_maps)
{
return 0;
}
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index d74b7a4ea154..a65d3d078e58 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -193,17 +193,17 @@ struct pinconf_generic_params {
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
- unsigned *reserved_maps, unsigned *num_maps,
+ unsigned int *reserved_maps, unsigned int *num_maps,
enum pinctrl_map_type type);
int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
- unsigned *num_maps, enum pinctrl_map_type type);
+ unsigned int *num_maps, enum pinctrl_map_type type);
void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps);
+ struct pinctrl_map *map, unsigned int num_maps);
static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
- unsigned *num_maps)
+ unsigned int *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_GROUP);
@@ -211,7 +211,7 @@ static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctld
static inline int pinconf_generic_dt_node_to_map_pin(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
- unsigned *num_maps)
+ unsigned int *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_PIN);
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index f8a8215e9021..770ec2221156 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -40,25 +40,25 @@ struct pinconf_ops {
bool is_generic;
#endif
int (*pin_config_get) (struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *config);
int (*pin_config_set) (struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs);
+ unsigned int num_configs);
int (*pin_config_group_get) (struct pinctrl_dev *pctldev,
- unsigned selector,
+ unsigned int selector,
unsigned long *config);
int (*pin_config_group_set) (struct pinctrl_dev *pctldev,
- unsigned selector,
+ unsigned int selector,
unsigned long *configs,
- unsigned num_configs);
+ unsigned int num_configs);
void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned offset);
+ unsigned int offset);
void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned selector);
+ unsigned int selector);
void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned long config);
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 4d252ea00ed1..9a8189ffd0f2 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -54,7 +54,7 @@ struct pingroup {
* @drv_data: driver-defined per-pin data. pinctrl core does not touch this
*/
struct pinctrl_pin_desc {
- unsigned number;
+ unsigned int number;
const char *name;
void *drv_data;
};
@@ -82,7 +82,7 @@ struct pinctrl_gpio_range {
unsigned int base;
unsigned int pin_base;
unsigned int npins;
- unsigned const *pins;
+ unsigned int const *pins;
struct gpio_chip *gc;
};
@@ -108,18 +108,18 @@ struct pinctrl_gpio_range {
struct pinctrl_ops {
int (*get_groups_count) (struct pinctrl_dev *pctldev);
const char *(*get_group_name) (struct pinctrl_dev *pctldev,
- unsigned selector);
+ unsigned int selector);
int (*get_group_pins) (struct pinctrl_dev *pctldev,
- unsigned selector,
- const unsigned **pins,
- unsigned *num_pins);
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins);
void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned offset);
+ unsigned int offset);
int (*dt_node_to_map) (struct pinctrl_dev *pctldev,
struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps);
+ struct pinctrl_map **map, unsigned int *num_maps);
void (*dt_free_map) (struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps);
+ struct pinctrl_map *map, unsigned int num_maps);
};
/**
@@ -193,7 +193,7 @@ extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *ranges,
- unsigned nranges);
+ unsigned int nranges);
extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
@@ -203,8 +203,8 @@ extern struct pinctrl_gpio_range *
pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
unsigned int pin);
extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
- const char *pin_group, const unsigned **pins,
- unsigned *num_pins);
+ const char *pin_group, const unsigned int **pins,
+ unsigned int *num_pins);
/**
* struct pinfunction - Description about a function
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index a7e370965c53..d6f7b58d6ad0 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -57,26 +57,26 @@ struct pinctrl_gpio_range;
* the pin request.
*/
struct pinmux_ops {
- int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
- int (*free) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*request) (struct pinctrl_dev *pctldev, unsigned int offset);
+ int (*free) (struct pinctrl_dev *pctldev, unsigned int offset);
int (*get_functions_count) (struct pinctrl_dev *pctldev);
const char *(*get_function_name) (struct pinctrl_dev *pctldev,
- unsigned selector);
+ unsigned int selector);
int (*get_function_groups) (struct pinctrl_dev *pctldev,
- unsigned selector,
- const char * const **groups,
- unsigned *num_groups);
- int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
- unsigned group_selector);
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups);
+ int (*set_mux) (struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset);
+ unsigned int offset);
void (*gpio_disable_free) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset);
+ unsigned int offset);
int (*gpio_set_direction) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset,
+ unsigned int offset,
bool input);
bool strict;
};
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index d2c3f16cf6b1..8ff23bf5a819 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -62,9 +62,6 @@ struct pipe_inode_info {
unsigned int tail;
unsigned int max_usage;
unsigned int ring_size;
-#ifdef CONFIG_WATCH_QUEUE
- bool note_loss;
-#endif
unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
@@ -72,6 +69,9 @@ struct pipe_inode_info {
unsigned int r_counter;
unsigned int w_counter;
bool poll_usage;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
@@ -125,6 +125,22 @@ struct pipe_buf_operations {
};
/**
+ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
+ * i.e. it was created with O_NOTIFICATION_PIPE
+ * @pipe: The pipe to check
+ *
+ * Return: true if pipe is a watch queue, false otherwise.
+ */
+static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
+{
+#ifdef CONFIG_WATCH_QUEUE
+ return pipe->watch_queue != NULL;
+#else
+ return false;
+#endif
+}
+
+/**
* pipe_empty - Return true if the pipe is empty
* @head: The pipe ring head pointer
* @tail: The pipe ring tail pointer
@@ -261,22 +277,18 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
-#ifdef CONFIG_WATCH_QUEUE
unsigned long account_pipe_buffers(struct user_struct *user,
unsigned long old, unsigned long new);
bool too_many_pipe_buffers_soft(unsigned long user_bufs);
bool too_many_pipe_buffers_hard(unsigned long user_bufs);
bool pipe_is_unprivileged_user(void);
-#endif
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
-#ifdef CONFIG_WATCH_QUEUE
int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
-#endif
-long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+long pipe_fcntl(struct file *, unsigned int, unsigned int arg);
struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
int create_pipe_files(struct file **, int);
-unsigned int round_pipe_size(unsigned long size);
+unsigned int round_pipe_size(unsigned int size);
#endif
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index f9c5ac80d59b..79594aeb160d 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -154,9 +154,10 @@ struct packet_stacked_data
struct pktcdvd_device
{
- struct block_device *bdev; /* dev attached */
+ struct bdev_handle *bdev_handle; /* dev attached */
+ /* handle acquired for bdev during pkt_open_dev() */
+ struct bdev_handle *open_bdev_handle;
dev_t pkt_dev; /* our dev */
- char name[20];
struct packet_settings settings;
struct packet_stats stats;
int refcnt; /* Open count */
diff --git a/include/linux/platform_data/asoc-pxa.h b/include/linux/platform_data/asoc-pxa.h
index 327454cd8246..7b5b9e20fbf5 100644
--- a/include/linux/platform_data/asoc-pxa.h
+++ b/include/linux/platform_data/asoc-pxa.h
@@ -27,5 +27,6 @@ typedef struct {
} pxa2xx_audio_ops_t;
extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops);
+extern void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio);
#endif
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
index 54a06a4d2618..596ca4f95cfa 100644
--- a/include/linux/platform_data/bd6107.h
+++ b/include/linux/platform_data/bd6107.h
@@ -8,7 +8,7 @@
struct device;
struct bd6107_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int def_value;
};
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index ab721cf13a98..7dae17b62a4d 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -4436,8 +4436,20 @@ struct ec_response_i2c_passthru_protect {
* These commands are for sending and receiving message via HDMI CEC
*/
+#define EC_CEC_MAX_PORTS 16
+
#define MAX_CEC_MSG_LEN 16
+/*
+ * Helper macros for packing/unpacking cec_events.
+ * bits[27:0] : bitmask of events from enum mkbp_cec_event
+ * bits[31:28]: port number
+ */
+#define EC_MKBP_EVENT_CEC_PACK(events, port) \
+ (((events) & GENMASK(27, 0)) | (((port) & 0xf) << 28))
+#define EC_MKBP_EVENT_CEC_GET_EVENTS(event) ((event) & GENMASK(27, 0))
+#define EC_MKBP_EVENT_CEC_GET_PORT(event) (((event) >> 28) & 0xf)
+
/* CEC message from the AP to be written on the CEC bus */
#define EC_CMD_CEC_WRITE_MSG 0x00B8
@@ -4449,19 +4461,54 @@ struct ec_params_cec_write {
uint8_t msg[MAX_CEC_MSG_LEN];
} __ec_align1;
+/**
+ * struct ec_params_cec_write_v1 - Message to write to the CEC bus
+ * @port: CEC port to write the message on
+ * @msg_len: length of msg in bytes
+ * @msg: message content to write to the CEC bus
+ */
+struct ec_params_cec_write_v1 {
+ uint8_t port;
+ uint8_t msg_len;
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
+/* CEC message read from a CEC bus reported back to the AP */
+#define EC_CMD_CEC_READ_MSG 0x00B9
+
+/**
+ * struct ec_params_cec_read - Read a message from the CEC bus
+ * @port: CEC port to read a message on
+ */
+struct ec_params_cec_read {
+ uint8_t port;
+} __ec_align1;
+
+/**
+ * struct ec_response_cec_read - Message read from the CEC bus
+ * @msg_len: length of msg in bytes
+ * @msg: message content read from the CEC bus
+ */
+struct ec_response_cec_read {
+ uint8_t msg_len;
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
/* Set various CEC parameters */
#define EC_CMD_CEC_SET 0x00BA
/**
* struct ec_params_cec_set - CEC parameters set
* @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to set the parameter on
* @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC
* or 1 to enable CEC functionality, in case cmd is
* CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical
* address between 0 and 15 or 0xff to unregister
*/
struct ec_params_cec_set {
- uint8_t cmd; /* enum cec_command */
+ uint8_t cmd : 4; /* enum cec_command */
+ uint8_t port : 4;
uint8_t val;
} __ec_align1;
@@ -4471,9 +4518,11 @@ struct ec_params_cec_set {
/**
* struct ec_params_cec_get - CEC parameters get
* @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to get the parameter on
*/
struct ec_params_cec_get {
- uint8_t cmd; /* enum cec_command */
+ uint8_t cmd : 4; /* enum cec_command */
+ uint8_t port : 4;
} __ec_align1;
/**
@@ -4487,6 +4536,17 @@ struct ec_response_cec_get {
uint8_t val;
} __ec_align1;
+/* Get the number of CEC ports */
+#define EC_CMD_CEC_PORT_COUNT 0x00C1
+
+/**
+ * struct ec_response_cec_port_count - CEC port count response
+ * @port_count: number of CEC ports
+ */
+struct ec_response_cec_port_count {
+ uint8_t port_count;
+} __ec_align1;
+
/* CEC parameters command */
enum cec_command {
/* CEC reading, writing and events enable */
@@ -4501,6 +4561,8 @@ enum mkbp_cec_event {
EC_MKBP_CEC_SEND_OK = BIT(0),
/* Outgoing message was not acknowledged */
EC_MKBP_CEC_SEND_FAILED = BIT(1),
+ /* Incoming message can be read out by AP */
+ EC_MKBP_CEC_HAVE_DATA = BIT(2),
};
/*****************************************************************************/
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 4f9f756bc17c..8865e350c12a 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -258,7 +258,7 @@ bool cros_ec_check_features(struct cros_ec_dev *ec, int feature);
int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
-int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata,
+int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, const void *outdata,
size_t outsize, void *indata, size_t insize);
/**
diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h
index bc208c64e3d7..1ef91c36f609 100644
--- a/include/linux/platform_data/davinci-cpufreq.h
+++ b/include/linux/platform_data/davinci-cpufreq.h
@@ -16,4 +16,10 @@ struct davinci_cpufreq_config {
int (*init)(void);
};
+#ifdef CONFIG_CPU_FREQ
+int davinci_cpufreq_init(void);
+#else
+static inline int davinci_cpufreq_init(void) { return 0; }
+#endif
+
#endif /* _MACH_DAVINCI_CPUFREQ_H */
diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h
deleted file mode 100644
index 10641633facc..000000000000
--- a/include/linux/platform_data/dma-ste-dma40.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2007-2010
- * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
- */
-
-
-#ifndef STE_DMA40_H
-#define STE_DMA40_H
-
-#include <linux/dmaengine.h>
-#include <linux/scatterlist.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-
-/*
- * Maxium size for a single dma descriptor
- * Size is limited to 16 bits.
- * Size is in the units of addr-widths (1,2,4,8 bytes)
- * Larger transfers will be split up to multiple linked desc
- */
-#define STEDMA40_MAX_SEG_SIZE 0xFFFF
-
-/* dev types for memcpy */
-#define STEDMA40_DEV_DST_MEMORY (-1)
-#define STEDMA40_DEV_SRC_MEMORY (-1)
-
-enum stedma40_mode {
- STEDMA40_MODE_LOGICAL = 0,
- STEDMA40_MODE_PHYSICAL,
- STEDMA40_MODE_OPERATION,
-};
-
-enum stedma40_mode_opt {
- STEDMA40_PCHAN_BASIC_MODE = 0,
- STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0,
- STEDMA40_PCHAN_MODULO_MODE,
- STEDMA40_PCHAN_DOUBLE_DST_MODE,
- STEDMA40_LCHAN_SRC_PHY_DST_LOG,
- STEDMA40_LCHAN_SRC_LOG_DST_PHY,
-};
-
-#define STEDMA40_ESIZE_8_BIT 0x0
-#define STEDMA40_ESIZE_16_BIT 0x1
-#define STEDMA40_ESIZE_32_BIT 0x2
-#define STEDMA40_ESIZE_64_BIT 0x3
-
-/* The value 4 indicates that PEN-reg shall be set to 0 */
-#define STEDMA40_PSIZE_PHY_1 0x4
-#define STEDMA40_PSIZE_PHY_2 0x0
-#define STEDMA40_PSIZE_PHY_4 0x1
-#define STEDMA40_PSIZE_PHY_8 0x2
-#define STEDMA40_PSIZE_PHY_16 0x3
-
-/*
- * The number of elements differ in logical and
- * physical mode
- */
-#define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2
-#define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4
-#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
-#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
-
-/* Maximum number of possible physical channels */
-#define STEDMA40_MAX_PHYS 32
-
-enum stedma40_flow_ctrl {
- STEDMA40_NO_FLOW_CTRL,
- STEDMA40_FLOW_CTRL,
-};
-
-/**
- * struct stedma40_half_channel_info - dst/src channel configuration
- *
- * @big_endian: true if the src/dst should be read as big endian
- * @data_width: Data width of the src/dst hardware
- * @p_size: Burst size
- * @flow_ctrl: Flow control on/off.
- */
-struct stedma40_half_channel_info {
- bool big_endian;
- enum dma_slave_buswidth data_width;
- int psize;
- enum stedma40_flow_ctrl flow_ctrl;
-};
-
-/**
- * struct stedma40_chan_cfg - Structure to be filled by client drivers.
- *
- * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
- * @high_priority: true if high-priority
- * @realtime: true if realtime mode is to be enabled. Only available on DMA40
- * version 3+, i.e DB8500v2+
- * @mode: channel mode: physical, logical, or operation
- * @mode_opt: options for the chosen channel mode
- * @dev_type: src/dst device type (driver uses dir to figure out which)
- * @src_info: Parameters for dst half channel
- * @dst_info: Parameters for dst half channel
- * @use_fixed_channel: if true, use physical channel specified by phy_channel
- * @phy_channel: physical channel to use, only if use_fixed_channel is true
- *
- * This structure has to be filled by the client drivers.
- * It is recommended to do all dma configurations for clients in the machine.
- *
- */
-struct stedma40_chan_cfg {
- enum dma_transfer_direction dir;
- bool high_priority;
- bool realtime;
- enum stedma40_mode mode;
- enum stedma40_mode_opt mode_opt;
- int dev_type;
- struct stedma40_half_channel_info src_info;
- struct stedma40_half_channel_info dst_info;
-
- bool use_fixed_channel;
- int phy_channel;
-};
-
-/**
- * struct stedma40_platform_data - Configuration struct for the dma device.
- *
- * @dev_tx: mapping between destination event line and io address
- * @dev_rx: mapping between source event line and io address
- * @disabled_channels: A vector, ending with -1, that marks physical channels
- * that are for different reasons not available for the driver.
- * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
- * which avoids HW bug that exists in some versions of the controller.
- * SoftLLI introduces relink overhead that could impact performace for
- * certain use cases.
- * @num_of_soft_lli_chans: The number of channels that needs to be configured
- * to use SoftLLI.
- * @use_esram_lcla: flag for mapping the lcla into esram region
- * @num_of_memcpy_chans: The number of channels reserved for memcpy.
- * @num_of_phy_chans: The number of physical channels implemented in HW.
- * 0 means reading the number of channels from DMA HW but this is only valid
- * for 'multiple of 4' channels, like 8.
- */
-struct stedma40_platform_data {
- int disabled_channels[STEDMA40_MAX_PHYS];
- int *soft_lli_chans;
- int num_of_soft_lli_chans;
- bool use_esram_lcla;
- int num_of_memcpy_chans;
- int num_of_phy_chans;
-};
-
-#ifdef CONFIG_STE_DMA40
-
-/**
- * stedma40_filter() - Provides stedma40_chan_cfg to the
- * ste_dma40 dma driver via the dmaengine framework.
- * does some checking of what's provided.
- *
- * Never directly called by client. It used by dmaengine.
- * @chan: dmaengine handle.
- * @data: Must be of type: struct stedma40_chan_cfg and is
- * the configuration of the framework.
- *
- *
- */
-
-bool stedma40_filter(struct dma_chan *chan, void *data);
-
-/**
- * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
- * (=device)
- *
- * @chan: dmaengine handle
- * @addr: source or destination physicall address.
- * @size: bytes to transfer
- * @direction: direction of transfer
- * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
- */
-
-static inline struct
-dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
- dma_addr_t addr,
- unsigned int size,
- enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct scatterlist sg;
- sg_init_table(&sg, 1);
- sg.dma_address = addr;
- sg.length = size;
-
- return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
-}
-
-#else
-static inline bool stedma40_filter(struct dma_chan *chan, void *data)
-{
- return false;
-}
-
-static inline struct
-dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
- dma_addr_t addr,
- unsigned int size,
- enum dma_transfer_direction direction,
- unsigned long flags)
-{
- return NULL;
-}
-#endif
-
-#endif
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index f377817ce75c..cdd8cfb424f5 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -144,9 +144,6 @@
#define OMAP_MAX_GPIO_LINES 192
-#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
-#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
-
#ifndef __ASSEMBLER__
struct omap_gpio_reg_offs {
u16 revision;
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
index 1a8b5b1946fe..323fbf5f7613 100644
--- a/include/linux/platform_data/gpio_backlight.h
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -8,7 +8,7 @@
struct device;
struct gpio_backlight_platform_data {
- struct device *fbdev;
+ struct device *dev;
};
#endif
diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h
index f2781aa7eff8..70e8a6bec0f6 100644
--- a/include/linux/platform_data/gsc_hwmon.h
+++ b/include/linux/platform_data/gsc_hwmon.h
@@ -40,6 +40,6 @@ struct gsc_hwmon_platform_data {
unsigned int resolution;
unsigned int vreference;
unsigned int fan_base;
- struct gsc_hwmon_channel channels[];
+ struct gsc_hwmon_channel channels[] __counted_by(nchannels);
};
#endif
diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h
index 6a000df5541f..8748680e9e3c 100644
--- a/include/linux/platform_data/hirschmann-hellcreek.h
+++ b/include/linux/platform_data/hirschmann-hellcreek.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Hirschmann Hellcreek TSN switch platform data.
*
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
index 2543c2a1c9ae..e2e895768311 100644
--- a/include/linux/platform_data/i2c-mux-reg.h
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -17,7 +17,6 @@
* @n_values: Number of multiplexer channels
* @little_endian: Indicating if the register is in little endian
* @write_only: Reading the register is not allowed by hardware
- * @classes: Optional I2C auto-detection classes
* @idle: Value to write to mux when idle
* @idle_in_use: indicate if idle value is in use
* @reg: Virtual address of the register to switch channel
@@ -30,7 +29,6 @@ struct i2c_mux_reg_platform_data {
int n_values;
bool little_endian;
bool write_only;
- const unsigned int *classes;
u32 idle;
bool idle_in_use;
void __iomem *reg;
diff --git a/include/linux/platform_data/keypad-omap.h b/include/linux/platform_data/keypad-omap.h
index 3e7c64c854f4..f3f1311cdf3a 100644
--- a/include/linux/platform_data/keypad-omap.h
+++ b/include/linux/platform_data/keypad-omap.h
@@ -19,9 +19,6 @@ struct omap_kp_platform_data {
bool rep;
unsigned long delay;
bool dbounce;
- /* specific to OMAP242x*/
- unsigned int *row_gpios;
- unsigned int *col_gpios;
};
/* Group (0..3) -- when multiple keys are pressed, only the
diff --git a/include/linux/platform_data/lcd-mipid.h b/include/linux/platform_data/lcd-mipid.h
index 63f05eb23827..4927cfc5158c 100644
--- a/include/linux/platform_data/lcd-mipid.h
+++ b/include/linux/platform_data/lcd-mipid.h
@@ -15,10 +15,8 @@ enum mipid_test_result {
#ifdef __KERNEL__
struct mipid_platform_data {
- int nreset_gpio;
int data_lines;
- void (*shutdown)(struct mipid_platform_data *pdata);
void (*set_bklight_level)(struct mipid_platform_data *pdata,
int level);
int (*get_bklight_level)(struct mipid_platform_data *pdata);
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 3441064713a3..3cc8db0b12b5 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -73,6 +73,9 @@ struct lp55xx_platform_data {
/* Clock configuration */
u8 clock_mode;
+ /* Charge pump mode */
+ u32 charge_pump_mode;
+
/* optional enable GPIO */
struct gpio_desc *enable_gpiod;
diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h
index c9da8d402750..95d85c1394bc 100644
--- a/include/linux/platform_data/lv5207lp.h
+++ b/include/linux/platform_data/lv5207lp.h
@@ -8,7 +8,7 @@
struct device;
struct lv5207lp_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int max_value;
unsigned int def_value;
};
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index ea1cc6d829e9..f177416635a2 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -20,10 +20,31 @@
#define __MICROCHIP_KSZ_H
#include <linux/types.h>
+#include <linux/platform_data/dsa.h>
+
+enum ksz_chip_id {
+ KSZ8563_CHIP_ID = 0x8563,
+ KSZ8795_CHIP_ID = 0x8795,
+ KSZ8794_CHIP_ID = 0x8794,
+ KSZ8765_CHIP_ID = 0x8765,
+ KSZ8830_CHIP_ID = 0x8830,
+ KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
+ KSZ9897_CHIP_ID = 0x00989700,
+ KSZ9893_CHIP_ID = 0x00989300,
+ KSZ9563_CHIP_ID = 0x00956300,
+ KSZ9567_CHIP_ID = 0x00956700,
+ LAN9370_CHIP_ID = 0x00937000,
+ LAN9371_CHIP_ID = 0x00937100,
+ LAN9372_CHIP_ID = 0x00937200,
+ LAN9373_CHIP_ID = 0x00937300,
+ LAN9374_CHIP_ID = 0x00937400,
+};
struct ksz_platform_data {
+ /* Must be first such that dsa_register_switch() can access it */
+ struct dsa_chip_data cd;
u32 chip_id;
- u16 enabled_ports;
};
#endif
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 91051e9907f3..054d0c3c5ec5 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -20,8 +20,6 @@ struct omap_mmc_platform_data {
* maximum frequency on the MMC bus */
unsigned int max_freq;
- /* switch the bus to a new slot */
- int (*switch_slot)(struct device *dev, int slot);
/* initialize board-specific MMC functionality, can be NULL if
* not supported */
int (*init)(struct device *dev);
diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h
index 0dd851ea1c72..7fcb55fe21c9 100644
--- a/include/linux/platform_data/omap-twl4030.h
+++ b/include/linux/platform_data/omap-twl4030.h
@@ -37,9 +37,6 @@ struct omap_tw4030_pdata {
bool has_digimic0;
bool has_digimic1;
u8 has_linein;
-
- /* Jack detect GPIO or <= 0 if it is not implemented */
- int jack_detect;
};
#endif /* _OMAP_TWL4030_H_ */
diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h
index 96c1a14ab365..3c3787c4d96c 100644
--- a/include/linux/platform_data/pca953x.h
+++ b/include/linux/platform_data/pca953x.h
@@ -11,21 +11,8 @@ struct pca953x_platform_data {
/* number of the first GPIO */
unsigned gpio_base;
- /* initial polarity inversion setting */
- u32 invert;
-
/* interrupt base */
int irq_base;
-
- void *context; /* param to setup/teardown */
-
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- void (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- const char *const *names;
};
#endif /* _LINUX_PCA953X_H */
diff --git a/include/linux/platform_data/pxa2xx_udc.h b/include/linux/platform_data/pxa2xx_udc.h
index ff9c35dca59d..bc99cc6a3c5f 100644
--- a/include/linux/platform_data/pxa2xx_udc.h
+++ b/include/linux/platform_data/pxa2xx_udc.h
@@ -25,4 +25,10 @@ struct pxa2xx_udc_mach_info {
int gpio_pullup; /* high == pullup activated */
};
+#ifdef CONFIG_PXA27x
+extern void pxa27x_clear_otgph(void);
+#else
+#define pxa27x_clear_otgph() do {} while (0)
+#endif
+
#endif
diff --git a/include/linux/platform_data/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h
deleted file mode 100644
index 22c53825528f..000000000000
--- a/include/linux/platform_data/rtc-ds2404.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * ds2404.h - platform data structure for the DS2404 RTC.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
- */
-
-#ifndef __LINUX_DS2404_H
-#define __LINUX_DS2404_H
-
-struct ds2404_platform_data {
-
- unsigned int gpio_rst;
- unsigned int gpio_clk;
- unsigned int gpio_dq;
-};
-#endif
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index d661399b217d..6c19d4fbbe39 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -10,7 +10,7 @@
#ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__
-#include <drm/drm_mode.h>
+#include <video/videomode.h>
enum shmob_drm_clk_source {
SHMOB_DRM_CLK_BUS,
@@ -18,72 +18,21 @@ enum shmob_drm_clk_source {
SHMOB_DRM_CLK_EXTERNAL,
};
-enum shmob_drm_interface {
- SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */
- SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */
- SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */
- SHMOB_DRM_IFACE_RGB12B, /* 12bpp */
- SHMOB_DRM_IFACE_RGB16, /* 16bpp */
- SHMOB_DRM_IFACE_RGB18, /* 18bpp */
- SHMOB_DRM_IFACE_RGB24, /* 24bpp */
- SHMOB_DRM_IFACE_YUV422, /* 16bpp */
- SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */
- SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */
- SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */
- SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */
- SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */
- SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */
- SHMOB_DRM_IFACE_SYS16A, /* 16bpp */
- SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */
- SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */
- SHMOB_DRM_IFACE_SYS18, /* 18bpp */
- SHMOB_DRM_IFACE_SYS24, /* 24bpp */
-};
-
-struct shmob_drm_backlight_data {
- const char *name;
- int max_brightness;
- int (*get_brightness)(void);
- int (*set_brightness)(int brightness);
-};
-
struct shmob_drm_panel_data {
unsigned int width_mm; /* Panel width in mm */
unsigned int height_mm; /* Panel height in mm */
- struct drm_mode_modeinfo mode;
+ struct videomode mode;
};
-struct shmob_drm_sys_interface_data {
- unsigned int read_latch:6;
- unsigned int read_setup:8;
- unsigned int read_cycle:8;
- unsigned int read_strobe:8;
- unsigned int write_setup:8;
- unsigned int write_cycle:8;
- unsigned int write_strobe:8;
- unsigned int cs_setup:3;
- unsigned int vsync_active_high:1;
- unsigned int vsync_dir_input:1;
-};
-
-#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */
-#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */
-#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */
-#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
-#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */
-
struct shmob_drm_interface_data {
- enum shmob_drm_interface interface;
- struct shmob_drm_sys_interface_data sys;
+ unsigned int bus_fmt; /* MEDIA_BUS_FMT_* */
unsigned int clk_div;
- unsigned int flags;
};
struct shmob_drm_platform_data {
enum shmob_drm_clk_source clk_source;
struct shmob_drm_interface_data iface;
struct shmob_drm_panel_data panel;
- struct shmob_drm_backlight_data backlight;
};
#endif /* __SHMOB_DRM_H__ */
diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h
deleted file mode 100644
index 14680d2a98f7..000000000000
--- a/include/linux/platform_data/sht3x.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2016 Sensirion AG, Switzerland
- * Author: David Frey <david.frey@sensirion.com>
- * Author: Pascal Sachs <pascal.sachs@sensirion.com>
- */
-
-#ifndef __SHT3X_H_
-#define __SHT3X_H_
-
-struct sht3x_platform_data {
- bool blocking_io;
- bool high_precision;
-};
-#endif /* __SHT3X_H_ */
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index c71a2dd66143..5f412a615532 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -105,10 +105,12 @@ struct si5351_clkout_config {
* @clk_xtal: xtal input clock
* @clk_clkin: clkin input clock
* @pll_src: array of pll source clock setting
+ * @pll_reset: array indicating if plls should be reset after setting the rate
* @clkout: array of clkout configuration
*/
struct si5351_platform_data {
enum si5351_pll_src pll_src[2];
+ bool pll_reset[2];
struct si5351_clkout_config clkout[8];
};
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index 3101152ce449..1d6e6c424fc6 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -36,6 +36,7 @@ struct s3c64xx_spi_info {
int src_clk_nr;
int num_cs;
bool no_cs;
+ bool polling;
int (*cfg_gpio)(void);
};
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 897051e51b78..a657830232ae 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -15,7 +15,7 @@
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
* Available only for accelerometer, magnetometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
- * Magnetometer DRDY is supported only on LSM9DS0.
+ * Magnetometer DRDY is supported only on LSM9DS0 and LSM303D.
* @open_drain: set the interrupt line to be open drain if possible.
* @spi_3wire: enable spi-3wire mode.
* @pullups: enable/disable i2c controller pullup resistors.
diff --git a/include/linux/platform_data/video-mx3fb.h b/include/linux/platform_data/video-mx3fb.h
deleted file mode 100644
index d03dc322a616..000000000000
--- a/include/linux/platform_data/video-mx3fb.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#ifndef __ASM_ARCH_MX3FB_H__
-#define __ASM_ARCH_MX3FB_H__
-
-#include <linux/device.h>
-#include <linux/fb.h>
-
-/* Proprietary FB_SYNC_ flags */
-#define FB_SYNC_OE_ACT_HIGH 0x80000000
-#define FB_SYNC_CLK_INVERT 0x40000000
-#define FB_SYNC_DATA_INVERT 0x20000000
-#define FB_SYNC_CLK_IDLE_EN 0x10000000
-#define FB_SYNC_SHARP_MODE 0x08000000
-#define FB_SYNC_SWAP_RGB 0x04000000
-#define FB_SYNC_CLK_SEL_EN 0x02000000
-
-/*
- * Specify the way your display is connected. The IPU can arbitrarily
- * map the internal colors to the external data lines. We only support
- * the following mappings at the moment.
- */
-enum disp_data_mapping {
- /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
- IPU_DISP_DATA_MAPPING_RGB666,
- /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
- IPU_DISP_DATA_MAPPING_RGB565,
- /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
- IPU_DISP_DATA_MAPPING_RGB888,
-};
-
-/**
- * struct mx3fb_platform_data - mx3fb platform data
- *
- * @dma_dev: pointer to the dma-device, used for dma-slave connection
- * @mode: pointer to a platform-provided per mxc_register_fb() videomode
- */
-struct mx3fb_platform_data {
- struct device *dma_dev;
- const char *name;
- const struct fb_videomode *mode;
- int num_modes;
- enum disp_data_mapping disp_data_fmt;
-};
-
-#endif
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 28234dc9fa6a..ab1c7deff118 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -58,6 +58,10 @@
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+/* This can only be used to disable the screen, not re-enable */
+#define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031
+/* Writing a brightness re-enables the screen if disabled */
+#define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032
#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
@@ -66,6 +70,7 @@
#define ASUS_WMI_DEVID_CAMERA 0x00060013
#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
+#define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
@@ -80,8 +85,19 @@
#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */
#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013
#define ASUS_WMI_DEVID_GPU_FAN_CTRL 0x00110014
+#define ASUS_WMI_DEVID_MID_FAN_CTRL 0x00110031
#define ASUS_WMI_DEVID_CPU_FAN_CURVE 0x00110024
#define ASUS_WMI_DEVID_GPU_FAN_CURVE 0x00110025
+#define ASUS_WMI_DEVID_MID_FAN_CURVE 0x00110032
+
+/* Tunables for AUS ROG laptops */
+#define ASUS_WMI_DEVID_PPT_PL2_SPPT 0x001200A0
+#define ASUS_WMI_DEVID_PPT_PL1_SPL 0x001200A3
+#define ASUS_WMI_DEVID_PPT_APU_SPPT 0x001200B0
+#define ASUS_WMI_DEVID_PPT_PLAT_SPPT 0x001200B1
+#define ASUS_WMI_DEVID_PPT_FPPT 0x001200C1
+#define ASUS_WMI_DEVID_NV_DYN_BOOST 0x001200C0
+#define ASUS_WMI_DEVID_NV_THERM_TARGET 0x001200C2
/* Power */
#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
@@ -95,7 +111,15 @@
/* Keyboard dock */
#define ASUS_WMI_DEVID_KBD_DOCK 0x00120063
-/* dgpu on/off */
+/* Charging mode - 1=Barrel, 2=USB */
+#define ASUS_WMI_DEVID_CHARGE_MODE 0x0012006C
+
+/* MCU powersave mode */
+#define ASUS_WMI_DEVID_MCU_POWERSAVE 0x001200E2
+
+/* epu is connected? 1 == true */
+#define ASUS_WMI_DEVID_EGPU_CONNECTED 0x00090018
+/* egpu on/off */
#define ASUS_WMI_DEVID_EGPU 0x00090019
/* dgpu on/off */
diff --git a/include/linux/platform_data/x86/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h
index 41df326583f9..7f132029316a 100644
--- a/include/linux/platform_data/x86/clk-lpss.h
+++ b/include/linux/platform_data/x86/clk-lpss.h
@@ -15,6 +15,6 @@ struct lpss_clk_data {
struct clk *clk;
};
-extern int lpss_atom_clk_init(void);
+int lpss_atom_clk_init(void);
#endif /* __CLK_LPSS_H */
diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h
index 57d6a10dfc9e..2d7f7120ba6b 100644
--- a/include/linux/platform_data/x86/simatic-ipc-base.h
+++ b/include/linux/platform_data/x86/simatic-ipc-base.h
@@ -2,7 +2,7 @@
/*
* Siemens SIMATIC IPC drivers
*
- * Copyright (c) Siemens AG, 2018-2021
+ * Copyright (c) Siemens AG, 2018-2023
*
* Authors:
* Henning Schild <henning.schild@siemens.com>
@@ -20,6 +20,9 @@
#define SIMATIC_IPC_DEVICE_127E 3
#define SIMATIC_IPC_DEVICE_227E 4
#define SIMATIC_IPC_DEVICE_227G 5
+#define SIMATIC_IPC_DEVICE_BX_21A 6
+#define SIMATIC_IPC_DEVICE_BX_39A 7
+#define SIMATIC_IPC_DEVICE_BX_59A 8
struct simatic_ipc_platform {
u8 devmode;
diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
index a48bb5240977..8d8b3b919674 100644
--- a/include/linux/platform_data/x86/simatic-ipc.h
+++ b/include/linux/platform_data/x86/simatic-ipc.h
@@ -2,7 +2,7 @@
/*
* Siemens SIMATIC IPC drivers
*
- * Copyright (c) Siemens AG, 2018-2021
+ * Copyright (c) Siemens AG, 2018-2023
*
* Authors:
* Henning Schild <henning.schild@siemens.com>
@@ -32,8 +32,12 @@ enum simatic_ipc_station_ids {
SIMATIC_IPC_IPC477E = 0x00000A02,
SIMATIC_IPC_IPC127E = 0x00000D01,
SIMATIC_IPC_IPC227G = 0x00000F01,
+ SIMATIC_IPC_IPC277G = 0x00000F02,
SIMATIC_IPC_IPCBX_39A = 0x00001001,
SIMATIC_IPC_IPCPX_39A = 0x00001002,
+ SIMATIC_IPC_IPCBX_21A = 0x00001101,
+ SIMATIC_IPC_IPCBX_56A = 0x00001201,
+ SIMATIC_IPC_IPCBX_59A = 0x00001202,
};
static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index b845fd83f429..7a41c72c1959 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -63,6 +63,8 @@ extern struct resource *platform_get_mem_or_io(struct platform_device *,
extern struct device *
platform_find_device_by_driver(struct device *start,
const struct device_driver *drv);
+
+#ifdef CONFIG_HAS_IOMEM
extern void __iomem *
devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
unsigned int index, struct resource **res);
@@ -72,6 +74,32 @@ devm_platform_ioremap_resource(struct platform_device *pdev,
extern void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
const char *name);
+#else
+
+static inline void __iomem *
+devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
+ unsigned int index, struct resource **res)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+
+static inline void __iomem *
+devm_platform_ioremap_resource(struct platform_device *pdev,
+ unsigned int index)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif
+
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_get_irq_optional(struct platform_device *, unsigned int);
extern int platform_irq_count(struct platform_device *);
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 0f352c1d3c80..8c1c8adf7fe9 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -75,20 +75,10 @@
#include <linux/container_of.h>
#include <linux/list.h>
-#include <linux/types.h>
+#include <linux/plist_types.h>
#include <asm/bug.h>
-struct plist_head {
- struct list_head node_list;
-};
-
-struct plist_node {
- int prio;
- struct list_head prio_list;
- struct list_head node_list;
-};
-
/**
* PLIST_HEAD_INIT - static struct plist_head initializer
* @head: struct plist_head variable name
diff --git a/include/linux/plist_types.h b/include/linux/plist_types.h
new file mode 100644
index 000000000000..c37e784330af
--- /dev/null
+++ b/include/linux/plist_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_PLIST_TYPES_H
+#define _LINUX_PLIST_TYPES_H
+
+#include <linux/types.h>
+
+struct plist_head {
+ struct list_head node_list;
+};
+
+struct plist_node {
+ int prio;
+ struct list_head prio_list;
+ struct list_head node_list;
+};
+
+#endif /* _LINUX_PLIST_TYPES_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 035d9649eba4..a2f3e53a8196 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -374,24 +374,39 @@ const struct dev_pm_ops name = { \
RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
}
-#ifdef CONFIG_PM
-#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+#define _EXPORT_PM_OPS(name, license, ns) \
const struct dev_pm_ops name; \
- __EXPORT_SYMBOL(name, sec, ns); \
+ __EXPORT_SYMBOL(name, license, ns); \
const struct dev_pm_ops name
-#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
-#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
-#else
-#define _EXPORT_DEV_PM_OPS(name, sec, ns) \
+
+#define _DISCARD_PM_OPS(name, license, ns) \
static __maybe_unused const struct dev_pm_ops __static_##name
+
+#ifdef CONFIG_PM
+#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
+#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
+#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
+#else
+#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
#define EXPORT_PM_FN_GPL(name)
#define EXPORT_PM_FN_NS_GPL(name, ns)
#endif
-#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
-#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "_gpl", "")
-#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
-#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "_gpl", #ns)
+#ifdef CONFIG_PM_SLEEP
+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
+#else
+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
+#endif
+
+#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
+#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
+
+#define EXPORT_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "")
+#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns)
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
@@ -404,19 +419,19 @@ const struct dev_pm_ops name = { \
_DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
#define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
- EXPORT_DEV_PM_OPS(name) = { \
+ EXPORT_DEV_SLEEP_PM_OPS(name) = { \
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
#define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
- EXPORT_GPL_DEV_PM_OPS(name) = { \
+ EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
#define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
- EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
#define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
- EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
@@ -448,6 +463,15 @@ const struct dev_pm_ops __maybe_unused name = { \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
+/*
+ * Use this if you want to have the suspend and resume callbacks be called
+ * with IRQs disabled.
+ */
+#define DEFINE_NOIRQ_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+const struct dev_pm_ops name = { \
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+}
+
#define pm_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM), (_ptr))
#define pm_sleep_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM_SLEEP), (_ptr))
@@ -657,6 +681,7 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
+ bool async_in_progress:1; /* Owned by the PM core */
unsigned int must_resume:1; /* Owned by the PM core */
unsigned int may_skip_resume:1; /* Set by subsystems */
#else
@@ -710,6 +735,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
* @dismiss: Called after unsuccessful driver probe and after driver removal.
+ * @set_performance_state: Called to request a new performance state.
*
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions instead of
@@ -722,6 +748,7 @@ struct dev_pm_domain {
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
void (*dismiss)(struct device *dev);
+ int (*set_performance_state)(struct device *dev, unsigned int state);
};
/*
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index ada3a0ab10bf..68669ce18720 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -91,10 +91,10 @@ static inline int devm_pm_clk_create(struct device *dev)
#endif
#ifdef CONFIG_HAVE_CLK
-extern void pm_clk_add_notifier(struct bus_type *bus,
+extern void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb);
#else
-static inline void pm_clk_add_notifier(struct bus_type *bus,
+static inline void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
}
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index f776fb93eaa0..b97c5e9820f9 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -61,6 +61,10 @@
* GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its
* components' next wakeup when determining the
* optimal idle state.
+ *
+ * GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states,
+ * but its corresponding OPP tables are not
+ * described in DT, but are given directly by FW.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -69,6 +73,7 @@
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
+#define GENPD_FLAG_OPP_TABLE_FW (1U << 7)
enum gpd_status {
GENPD_STATE_ON = 0, /* PM domain is on */
@@ -113,7 +118,6 @@ struct genpd_power_state {
};
struct genpd_lock_ops;
-struct dev_pm_opp;
struct opp_table;
struct generic_pm_domain {
@@ -141,8 +145,6 @@ struct generic_pm_domain {
int (*power_on)(struct generic_pm_domain *domain);
struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
struct opp_table *opp_table; /* OPP table of the genpd */
- unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
- struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
unsigned int state);
struct gpd_dev_ops dev_ops;
@@ -343,8 +345,6 @@ int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
-unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -390,13 +390,6 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
return -ENODEV;
}
-static inline unsigned int
-pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp)
-{
- return 0;
-}
-
static inline int genpd_dev_pm_attach(struct device *dev)
{
return 0;
@@ -430,6 +423,7 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
void dev_pm_domain_detach(struct device *dev, bool power_off);
int dev_pm_domain_start(struct device *dev);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
+int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
{
@@ -452,6 +446,11 @@ static inline int dev_pm_domain_start(struct device *dev)
}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
+static inline int dev_pm_domain_set_performance_state(struct device *dev,
+ unsigned int state)
+{
+ return 0;
+}
#endif
#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index dc1fb5890792..76dcb7f37bcd 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -45,18 +45,6 @@ struct dev_pm_opp_supply {
unsigned long u_watt;
};
-/**
- * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
- * @avg: Average bandwidth corresponding to this OPP (in icc units)
- * @peak: Peak bandwidth corresponding to this OPP (in icc units)
- *
- * This structure stores the bandwidth values for a single interconnect path.
- */
-struct dev_pm_opp_icc_bw {
- u32 avg;
- u32 peak;
-};
-
typedef int (*config_regulators_t)(struct device *dev,
struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
struct regulator **regulators, unsigned int count);
@@ -74,8 +62,10 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
* @supported_hw_count: Number of elements in the array.
* @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
* @genpd_names: Null terminated array of pointers containing names of genpd to
- * attach.
- * @virt_devs: Pointer to return the array of virtual devices.
+ * attach. Mutually exclusive with required_devs.
+ * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually
+ * exclusive with required_devs.
+ * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs.
*
* This structure contains platform specific OPP configurations for the device.
*/
@@ -90,6 +80,22 @@ struct dev_pm_opp_config {
const char * const *regulator_names;
const char * const *genpd_names;
struct device ***virt_devs;
+ struct device **required_devs;
+};
+
+#define OPP_LEVEL_UNSET U32_MAX
+
+/**
+ * struct dev_pm_opp_data - The data to use to initialize an OPP.
+ * @level: The performance level for the OPP. Set level to OPP_LEVEL_UNSET if
+ * level field isn't used.
+ * @freq: The clock rate in Hz for the OPP.
+ * @u_volt: The voltage in uV for the OPP.
+ */
+struct dev_pm_opp_data {
+ unsigned int level;
+ unsigned long freq;
+ unsigned long u_volt;
};
#if defined(CONFIG_PM_OPP)
@@ -103,7 +109,7 @@ int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *su
unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp);
-unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index);
unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp);
@@ -121,16 +127,31 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
+
+struct dev_pm_opp *
+dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ u32 index, bool available);
+
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
+struct dev_pm_opp *dev_pm_opp_find_freq_floor_indexed(struct device *dev,
+ unsigned long *freq, u32 index);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_indexed(struct device *dev,
+ unsigned long *freq, u32 index);
+
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level);
+
struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
unsigned int *level);
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
- unsigned long *freq);
+struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
+ unsigned int *level);
struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
unsigned int *bw, int index);
@@ -140,8 +161,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
void dev_pm_opp_put(struct dev_pm_opp *opp);
-int dev_pm_opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt);
+int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp);
+
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
void dev_pm_opp_remove_all_dynamic(struct device *dev);
@@ -200,7 +221,7 @@ static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
return 0;
}
-static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+static inline unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
{
return 0;
}
@@ -247,26 +268,27 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
return 0;
}
-static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level)
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq, bool available)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
- unsigned int *level)
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ u32 index, bool available)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
- unsigned long freq, bool available)
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
- unsigned long *freq)
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, u32 index)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -277,6 +299,30 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, u32 index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
+ unsigned int *level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
unsigned int *bw, int index)
{
@@ -291,8 +337,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
-static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt)
+static inline int
+dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp)
{
return -EOPNOTSUPP;
}
@@ -488,6 +534,17 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta
/* OPP Configuration helpers */
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ struct dev_pm_opp_data data = {
+ .freq = freq,
+ .u_volt = u_volt,
+ };
+
+ return dev_pm_opp_add_dynamic(dev, &data);
+}
+
/* Regulators helpers */
static inline int dev_pm_opp_set_regulators(struct device *dev,
const char * const names[])
@@ -631,4 +688,9 @@ static inline void dev_pm_opp_put_prop_name(int token)
dev_pm_opp_clear_config(token);
}
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_freq_indexed(opp, 0);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 9a8151a2bdea..7c9b35448563 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -85,8 +85,6 @@ extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
-extern void pm_runtime_update_max_time_suspended(struct device *dev,
- s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
index dd42d16945d0..d9642c6cf852 100644
--- a/include/linux/pm_wakeirq.h
+++ b/include/linux/pm_wakeirq.h
@@ -10,8 +10,6 @@ extern int dev_pm_set_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
-extern void dev_pm_enable_wake_irq(struct device *dev);
-extern void dev_pm_disable_wake_irq(struct device *dev);
#else /* !CONFIG_PM */
@@ -34,13 +32,5 @@ static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
-static inline void dev_pm_enable_wake_irq(struct device *dev)
-{
-}
-
-static inline void dev_pm_disable_wake_irq(struct device *dev)
-{
-}
-
#endif /* CONFIG_PM */
#endif /* _LINUX_PM_WAKEIRQ_H */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 77f4849e3418..6eb9adaef52b 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -194,6 +194,16 @@ static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
#endif /* !CONFIG_PM_SLEEP */
+static inline bool device_awake_path(struct device *dev)
+{
+ return device_wakeup_path(dev);
+}
+
+static inline void device_set_awake_path(struct device *dev)
+{
+ device_set_wakeup_path(dev);
+}
+
static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
{
return pm_wakeup_ws_event(ws, msec, false);
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index c2a7cfbca713..ddbe7c3ca4ce 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -291,7 +291,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
struct pnp_fixup {
char id[7];
- void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
+ void (*quirk_function) (struct pnp_dev *dev); /* fixup function */
};
/* config parameters */
@@ -419,8 +419,8 @@ struct pnp_protocol {
/* protocol specific suspend/resume */
bool (*can_wakeup) (struct pnp_dev *dev);
- int (*suspend) (struct pnp_dev * dev, pm_message_t state);
- int (*resume) (struct pnp_dev * dev);
+ int (*suspend) (struct pnp_dev *dev, pm_message_t state);
+ int (*resume) (struct pnp_dev *dev);
/* used by pnp layer only (look but don't touch) */
unsigned char number; /* protocol number */
@@ -435,7 +435,7 @@ struct pnp_protocol {
#define protocol_for_each_dev(protocol, dev) \
list_for_each_entry(dev, &(protocol)->devices, protocol_list)
-extern struct bus_type pnp_bus_type;
+extern const struct bus_type pnp_bus_type;
#if defined(CONFIG_PNP)
@@ -492,7 +492,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
-static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;}
+static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0; }
/* protocol helpers */
static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 851a855d3868..1f0ee2459f2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -83,6 +83,8 @@
/********** net/core/skbuff.c **********/
#define SKB_LIST_POISON_NEXT ((void *)(0x800 + POISON_POINTER_DELTA))
+/********** net/ **********/
+#define NET_PTR_POISON ((void *)(0x801 + POISON_POINTER_DELTA))
/********** kernel/bpf/ **********/
#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
@@ -90,4 +92,7 @@
/********** VFS **********/
#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
+/********** lib/stackdepot.c **********/
+#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 468328b1e1dd..ef8619f48920 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -14,6 +14,7 @@
#include <linux/rwsem.h>
struct posix_clock;
+struct posix_clock_context;
/**
* struct posix_clock_operations - functional interface to the clock
@@ -50,18 +51,18 @@ struct posix_clock_operations {
/*
* Optional character device methods:
*/
- long (*ioctl) (struct posix_clock *pc,
- unsigned int cmd, unsigned long arg);
+ long (*ioctl)(struct posix_clock_context *pccontext, unsigned int cmd,
+ unsigned long arg);
- int (*open) (struct posix_clock *pc, fmode_t f_mode);
+ int (*open)(struct posix_clock_context *pccontext, fmode_t f_mode);
- __poll_t (*poll) (struct posix_clock *pc,
- struct file *file, poll_table *wait);
+ __poll_t (*poll)(struct posix_clock_context *pccontext, struct file *file,
+ poll_table *wait);
- int (*release) (struct posix_clock *pc);
+ int (*release)(struct posix_clock_context *pccontext);
- ssize_t (*read) (struct posix_clock *pc,
- uint flags, char __user *buf, size_t cnt);
+ ssize_t (*read)(struct posix_clock_context *pccontext, uint flags,
+ char __user *buf, size_t cnt);
};
/**
@@ -91,6 +92,24 @@ struct posix_clock {
};
/**
+ * struct posix_clock_context - represents clock file operations context
+ *
+ * @clk: Pointer to the clock
+ * @private_clkdata: Pointer to user data
+ *
+ * Drivers should use struct posix_clock_context during specific character
+ * device file operation methods to access the posix clock.
+ *
+ * Drivers can store a private data structure during the open operation
+ * if they have specific information that is required in other file
+ * operations.
+ */
+struct posix_clock_context {
+ struct posix_clock *clk;
+ void *private_clkdata;
+};
+
+/**
* posix_clock_register() - register a new clock
* @clk: Pointer to the clock. Caller must provide 'ops' field
* @dev: Pointer to the initialized device. Caller must provide
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index d607f51404fc..dc7b738de299 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -2,40 +2,16 @@
#ifndef _linux_POSIX_TIMERS_H
#define _linux_POSIX_TIMERS_H
-#include <linux/spinlock.h>
+#include <linux/alarmtimer.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/alarmtimer.h>
+#include <linux/posix-timers_types.h>
+#include <linux/spinlock.h>
#include <linux/timerqueue.h>
struct kernel_siginfo;
struct task_struct;
-/*
- * Bit fields within a clockid:
- *
- * The most significant 29 bits hold either a pid or a file descriptor.
- *
- * Bit 2 indicates whether a cpu clock refers to a thread or a process.
- *
- * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
- *
- * A clockid is invalid if bits 2, 1, and 0 are all set.
- */
-#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
-#define CPUCLOCK_PERTHREAD(clock) \
- (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
-
-#define CPUCLOCK_PERTHREAD_MASK 4
-#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
-#define CPUCLOCK_CLOCK_MASK 3
-#define CPUCLOCK_PROF 0
-#define CPUCLOCK_VIRT 1
-#define CPUCLOCK_SCHED 2
-#define CPUCLOCK_MAX 3
-#define CLOCKFD CPUCLOCK_MAX
-#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
-
static inline clockid_t make_process_cpuclock(const unsigned int pid,
const clockid_t clock)
{
@@ -109,44 +85,6 @@ static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp)
ctmr->node.expires = exp;
}
-/**
- * posix_cputimer_base - Container per posix CPU clock
- * @nextevt: Earliest-expiration cache
- * @tqhead: timerqueue head for cpu_timers
- */
-struct posix_cputimer_base {
- u64 nextevt;
- struct timerqueue_head tqhead;
-};
-
-/**
- * posix_cputimers - Container for posix CPU timer related data
- * @bases: Base container for posix CPU clocks
- * @timers_active: Timers are queued.
- * @expiry_active: Timer expiry is active. Used for
- * process wide timers to avoid multiple
- * task trying to handle expiry concurrently
- *
- * Used in task_struct and signal_struct
- */
-struct posix_cputimers {
- struct posix_cputimer_base bases[CPUCLOCK_MAX];
- unsigned int timers_active;
- unsigned int expiry_active;
-};
-
-/**
- * posix_cputimers_work - Container for task work based posix CPU timer expiry
- * @work: The task work to be scheduled
- * @mutex: Mutex held around expiry in context of this task work
- * @scheduled: @work has been scheduled already, no further processing
- */
-struct posix_cputimers_work {
- struct callback_head work;
- struct mutex mutex;
- unsigned int scheduled;
-};
-
static inline void posix_cputimers_init(struct posix_cputimers *pct)
{
memset(pct, 0, sizeof(*pct));
@@ -179,7 +117,6 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
.bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \
},
#else
-struct posix_cputimers { };
struct cpu_timer { };
#define INIT_CPU_TIMERS(s)
static inline void posix_cputimers_init(struct posix_cputimers *pct) { }
diff --git a/include/linux/posix-timers_types.h b/include/linux/posix-timers_types.h
new file mode 100644
index 000000000000..a4712c1008c9
--- /dev/null
+++ b/include/linux/posix-timers_types.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _linux_POSIX_TIMERS_TYPES_H
+#define _linux_POSIX_TIMERS_TYPES_H
+
+#include <linux/mutex_types.h>
+#include <linux/timerqueue_types.h>
+#include <linux/types.h>
+
+/*
+ * Bit fields within a clockid:
+ *
+ * The most significant 29 bits hold either a pid or a file descriptor.
+ *
+ * Bit 2 indicates whether a cpu clock refers to a thread or a process.
+ *
+ * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
+ *
+ * A clockid is invalid if bits 2, 1, and 0 are all set.
+ */
+#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
+#define CPUCLOCK_PERTHREAD(clock) \
+ (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
+
+#define CPUCLOCK_PERTHREAD_MASK 4
+#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
+#define CPUCLOCK_CLOCK_MASK 3
+#define CPUCLOCK_PROF 0
+#define CPUCLOCK_VIRT 1
+#define CPUCLOCK_SCHED 2
+#define CPUCLOCK_MAX 3
+#define CLOCKFD CPUCLOCK_MAX
+#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
+
+#ifdef CONFIG_POSIX_TIMERS
+
+/**
+ * posix_cputimer_base - Container per posix CPU clock
+ * @nextevt: Earliest-expiration cache
+ * @tqhead: timerqueue head for cpu_timers
+ */
+struct posix_cputimer_base {
+ u64 nextevt;
+ struct timerqueue_head tqhead;
+};
+
+/**
+ * posix_cputimers - Container for posix CPU timer related data
+ * @bases: Base container for posix CPU clocks
+ * @timers_active: Timers are queued.
+ * @expiry_active: Timer expiry is active. Used for
+ * process wide timers to avoid multiple
+ * task trying to handle expiry concurrently
+ *
+ * Used in task_struct and signal_struct
+ */
+struct posix_cputimers {
+ struct posix_cputimer_base bases[CPUCLOCK_MAX];
+ unsigned int timers_active;
+ unsigned int expiry_active;
+};
+
+/**
+ * posix_cputimers_work - Container for task work based posix CPU timer expiry
+ * @work: The task work to be scheduled
+ * @mutex: Mutex held around expiry in context of this task work
+ * @scheduled: @work has been scheduled already, no further processing
+ */
+struct posix_cputimers_work {
+ struct callback_head work;
+ struct mutex mutex;
+ unsigned int scheduled;
+};
+
+#else /* CONFIG_POSIX_TIMERS */
+
+struct posix_cputimers { };
+
+#endif /* CONFIG_POSIX_TIMERS */
+
+#endif /* _linux_POSIX_TIMERS_TYPES_H */
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 7c8d65414a70..7d8025fb74b7 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -83,5 +83,6 @@ struct bq27xxx_device_info {
void bq27xxx_battery_update(struct bq27xxx_device_info *di);
int bq27xxx_battery_setup(struct bq27xxx_device_info *di);
void bq27xxx_battery_teardown(struct bq27xxx_device_info *di);
+extern const struct dev_pm_ops bq27xxx_battery_battery_pm_ops;
#endif
diff --git a/include/linux/power/power_on_reason.h b/include/linux/power/power_on_reason.h
new file mode 100644
index 000000000000..95a1ec0c403c
--- /dev/null
+++ b/include/linux/power/power_on_reason.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Author: Kamel Bouhra <kamel.bouhara@bootlin.com>
+ */
+
+#ifndef POWER_ON_REASON_H
+#define POWER_ON_REASON_H
+
+#define POWER_ON_REASON_REGULAR "regular power-up"
+#define POWER_ON_REASON_RTC "RTC wakeup"
+#define POWER_ON_REASON_WATCHDOG "watchdog timeout"
+#define POWER_ON_REASON_SOFTWARE "software reset"
+#define POWER_ON_REASON_RST_BTN "reset button action"
+#define POWER_ON_REASON_CPU_CLK_FAIL "CPU clock failure"
+#define POWER_ON_REASON_XTAL_FAIL "crystal oscillator failure"
+#define POWER_ON_REASON_BROWN_OUT "brown-out reset"
+#define POWER_ON_REASON_UNKNOWN "unknown reason"
+
+#endif /* POWER_ON_REASON_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index a427f13c757f..c0992a77feea 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -767,7 +767,6 @@ struct power_supply_battery_info {
int bti_resistance_tolerance;
};
-extern struct atomic_notifier_head power_supply_notifier;
extern int power_supply_reg_notifier(struct notifier_block *nb);
extern void power_supply_unreg_notifier(struct notifier_block *nb);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
diff --git a/include/linux/pr.h b/include/linux/pr.h
index 94ceec713afe..3003daec28a5 100644
--- a/include/linux/pr.h
+++ b/include/linux/pr.h
@@ -4,6 +4,18 @@
#include <uapi/linux/pr.h>
+struct pr_keys {
+ u32 generation;
+ u32 num_keys;
+ u64 keys[];
+};
+
+struct pr_held_reservation {
+ u64 key;
+ u32 generation;
+ enum pr_type type;
+};
+
struct pr_ops {
int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key,
u32 flags);
@@ -14,6 +26,19 @@ struct pr_ops {
int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort);
int (*pr_clear)(struct block_device *bdev, u64 key);
+ /*
+ * pr_read_keys - Read the registered keys and return them in the
+ * pr_keys->keys array. The keys array will have been allocated at the
+ * end of the pr_keys struct, and pr_keys->num_keys must be set to the
+ * number of keys the array can hold. If there are more than can fit
+ * in the array, success will still be returned and pr_keys->num_keys
+ * will reflect the total number of keys the device contains, so the
+ * caller can retry with a larger array.
+ */
+ int (*pr_read_keys)(struct block_device *bdev,
+ struct pr_keys *keys_info);
+ int (*pr_read_reservation)(struct block_device *bdev,
+ struct pr_held_reservation *rsv);
};
#endif /* LINUX_PR_H */
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index f2ed5b72b3d6..f7f1e5251c67 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -10,7 +10,6 @@
#include <linux/types.h>
#include <linux/once.h>
-#include <linux/percpu.h>
#include <linux/random.h>
struct rnd_state {
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 0df425bf9bd7..7233e9cf1bab 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -8,7 +8,8 @@
*/
#include <linux/linkage.h>
-#include <linux/list.h>
+#include <linux/cleanup.h>
+#include <linux/types.h>
/*
* We put the hardirq and softirq counter into the preemption
@@ -98,14 +99,21 @@ static __always_inline unsigned char interrupt_context_level(void)
return level;
}
+/*
+ * These macro definitions avoid redundant invocations of preempt_count()
+ * because such invocations would result in redundant loads given that
+ * preempt_count() is commonly implemented with READ_ONCE().
+ */
+
#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#ifdef CONFIG_PREEMPT_RT
# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
+# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
#else
# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
#endif
-#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
/*
* Macros to retrieve the current execution context:
@@ -118,7 +126,11 @@ static __always_inline unsigned char interrupt_context_level(void)
#define in_nmi() (nmi_count())
#define in_hardirq() (hardirq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
+#ifdef CONFIG_PREEMPT_RT
+# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
+#else
+# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+#endif
/*
* The following macros are deprecated and should not be used in new code:
@@ -348,7 +360,9 @@ void preempt_notifier_unregister(struct preempt_notifier *notifier);
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
struct preempt_ops *ops)
{
- INIT_HLIST_NODE(&notifier->link);
+ /* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */
+ notifier->link.next = NULL;
+ notifier->link.pprev = NULL;
notifier->ops = ops;
}
@@ -463,4 +477,8 @@ static __always_inline void preempt_enable_nested(void)
preempt_enable();
}
+DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
+DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
+DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index b83a3f944f28..b068e2e60939 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -25,11 +25,10 @@ struct page;
prefetch() should be defined by the architecture, if not, the
#define below provides a no-op define.
- There are 3 prefetch() macros:
+ There are 2 prefetch() macros:
prefetch(x) - prefetches the cacheline at "x" for read
prefetchw(x) - prefetches the cacheline at "x" for write
- spin_lock_prefetch(x) - prefetches the spinlock *x for taking
there is also PREFETCH_STRIDE which is the architecure-preferred
"lookahead" size for prefetching streamed operations.
@@ -44,10 +43,6 @@ struct page;
#define prefetchw(x) __builtin_prefetch(x,1)
#endif
-#ifndef ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) prefetchw(x)
-#endif
-
#ifndef PREFETCH_STRIDE
#define PREFETCH_STRIDE (4*L1_CACHE_BYTES)
#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 0260f5ea98fe..0b2a89854440 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -65,6 +65,7 @@ struct proc_fs_info {
kgid_t pid_gid;
enum proc_hidepid hide_pid;
enum proc_pidonly pidonly;
+ struct rcu_head rcu;
};
static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
@@ -158,6 +159,9 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
+void arch_report_meminfo(struct seq_file *m);
+void arch_proc_pid_thread_features(struct seq_file *m, struct task_struct *task);
+
#else /* CONFIG_PROC_FS */
static inline void proc_root_init(void)
diff --git a/include/linux/property.h b/include/linux/property.h
index 66df1a15d518..e6516d0b7d52 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -10,6 +10,7 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/args.h>
#include <linux/bits.h>
#include <linux/fwnode.h>
#include <linux/stddef.h>
@@ -79,12 +80,62 @@ int fwnode_property_match_string(const struct fwnode_handle *fwnode,
bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
+static inline bool fwnode_device_is_big_endian(const struct fwnode_handle *fwnode)
+{
+ if (fwnode_property_present(fwnode, "big-endian"))
+ return true;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+ fwnode_property_present(fwnode, "native-endian"))
+ return true;
+ return false;
+}
+
static inline
bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char *compat)
{
return fwnode_property_match_string(fwnode, "compatible", compat) >= 0;
}
+/**
+ * device_is_big_endian - check if a device has BE registers
+ * @dev: Pointer to the struct device
+ *
+ * Returns: true if the device has a "big-endian" property, or if the kernel
+ * was compiled for BE *and* the device has a "native-endian" property.
+ * Returns false otherwise.
+ *
+ * Callers would nominally use ioread32be/iowrite32be if
+ * device_is_big_endian() == true, or readl/writel otherwise.
+ */
+static inline bool device_is_big_endian(const struct device *dev)
+{
+ return fwnode_device_is_big_endian(dev_fwnode(dev));
+}
+
+/**
+ * device_is_compatible - match 'compatible' property of the device with a given string
+ * @dev: Pointer to the struct device
+ * @compat: The string to match 'compatible' property with
+ *
+ * Returns: true if matches, otherwise false.
+ */
+static inline bool device_is_compatible(const struct device *dev, const char *compat)
+{
+ return fwnode_device_is_compatible(dev_fwnode(dev), compat);
+}
+
+int fwnode_property_match_property_string(const struct fwnode_handle *fwnode,
+ const char *propname,
+ const char * const *array, size_t n);
+
+static inline
+int device_property_match_property_string(const struct device *dev,
+ const char *propname,
+ const char * const *array, size_t n)
+{
+ return fwnode_property_match_property_string(dev_fwnode(dev), propname, array, n);
+}
+
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
@@ -96,6 +147,7 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *fwnode_get_name(const struct fwnode_handle *fwnode);
const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
+bool fwnode_name_eq(const struct fwnode_handle *fwnode, const char *name);
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
@@ -276,7 +328,7 @@ struct software_node_ref_args {
#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \
(const struct software_node_ref_args) { \
.node = _ref_, \
- .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+ .nargs = COUNT_ARGS(__VA_ARGS__), \
.args = { __VA_ARGS__ }, \
}
@@ -476,6 +528,13 @@ struct software_node {
const struct property_entry *properties;
};
+#define SOFTWARE_NODE(_name_, _properties_, _parent_) \
+ (struct software_node) { \
+ .name = _name_, \
+ .properties = _properties_, \
+ .parent = _parent_, \
+ }
+
bool is_software_node(const struct fwnode_handle *fwnode);
const struct software_node *
to_software_node(const struct fwnode_handle *fwnode);
diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h
index ecfded30ed05..c9a31c567e85 100644
--- a/include/linux/pruss_driver.h
+++ b/include/linux/pruss_driver.h
@@ -9,7 +9,55 @@
#ifndef _PRUSS_DRIVER_H_
#define _PRUSS_DRIVER_H_
+#include <linux/mutex.h>
+#include <linux/remoteproc/pruss.h>
#include <linux/types.h>
+#include <linux/err.h>
+
+/*
+ * enum pruss_gp_mux_sel - PRUSS GPI/O Mux modes for the
+ * PRUSS_GPCFG0/1 registers
+ *
+ * NOTE: The below defines are the most common values, but there
+ * are some exceptions like on 66AK2G, where the RESERVED and MII2
+ * values are interchanged. Also, this bit-field does not exist on
+ * AM335x SoCs
+ */
+enum pruss_gp_mux_sel {
+ PRUSS_GP_MUX_SEL_GP,
+ PRUSS_GP_MUX_SEL_ENDAT,
+ PRUSS_GP_MUX_SEL_RESERVED,
+ PRUSS_GP_MUX_SEL_SD,
+ PRUSS_GP_MUX_SEL_MII2,
+ PRUSS_GP_MUX_SEL_MAX,
+};
+
+/*
+ * enum pruss_gpi_mode - PRUSS GPI configuration modes, used
+ * to program the PRUSS_GPCFG0/1 registers
+ */
+enum pruss_gpi_mode {
+ PRUSS_GPI_MODE_DIRECT,
+ PRUSS_GPI_MODE_PARALLEL,
+ PRUSS_GPI_MODE_28BIT_SHIFT,
+ PRUSS_GPI_MODE_MII,
+ PRUSS_GPI_MODE_MAX,
+};
+
+/**
+ * enum pru_type - PRU core type identifier
+ *
+ * @PRU_TYPE_PRU: Programmable Real-time Unit
+ * @PRU_TYPE_RTU: Auxiliary Programmable Real-Time Unit
+ * @PRU_TYPE_TX_PRU: Transmit Programmable Real-Time Unit
+ * @PRU_TYPE_MAX: just keep this one at the end
+ */
+enum pru_type {
+ PRU_TYPE_PRU,
+ PRU_TYPE_RTU,
+ PRU_TYPE_TX_PRU,
+ PRU_TYPE_MAX,
+};
/*
* enum pruss_mem - PRUSS memory range identifiers
@@ -39,6 +87,8 @@ struct pruss_mem_region {
* @cfg_base: base iomap for CFG region
* @cfg_regmap: regmap for config region
* @mem_regions: data for each of the PRUSS memory regions
+ * @mem_in_use: to indicate if memory resource is in use
+ * @lock: mutex to serialize access to resources
* @core_clk_mux: clk handle for PRUSS CORE_CLK_MUX
* @iep_clk_mux: clk handle for PRUSS IEP_CLK_MUX
*/
@@ -47,8 +97,81 @@ struct pruss {
void __iomem *cfg_base;
struct regmap *cfg_regmap;
struct pruss_mem_region mem_regions[PRUSS_MEM_MAX];
+ struct pruss_mem_region *mem_in_use[PRUSS_MEM_MAX];
+ struct mutex lock; /* PRU resource lock */
struct clk *core_clk_mux;
struct clk *iep_clk_mux;
};
+#if IS_ENABLED(CONFIG_TI_PRUSS)
+
+struct pruss *pruss_get(struct rproc *rproc);
+void pruss_put(struct pruss *pruss);
+int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id,
+ struct pruss_mem_region *region);
+int pruss_release_mem_region(struct pruss *pruss,
+ struct pruss_mem_region *region);
+int pruss_cfg_get_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 *mux);
+int pruss_cfg_set_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 mux);
+int pruss_cfg_gpimode(struct pruss *pruss, enum pruss_pru_id pru_id,
+ enum pruss_gpi_mode mode);
+int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable);
+int pruss_cfg_xfr_enable(struct pruss *pruss, enum pru_type pru_type,
+ bool enable);
+
+#else
+
+static inline struct pruss *pruss_get(struct rproc *rproc)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void pruss_put(struct pruss *pruss) { }
+
+static inline int pruss_request_mem_region(struct pruss *pruss,
+ enum pruss_mem mem_id,
+ struct pruss_mem_region *region)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_release_mem_region(struct pruss *pruss,
+ struct pruss_mem_region *region)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_cfg_get_gpmux(struct pruss *pruss,
+ enum pruss_pru_id pru_id, u8 *mux)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int pruss_cfg_set_gpmux(struct pruss *pruss,
+ enum pruss_pru_id pru_id, u8 mux)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int pruss_cfg_gpimode(struct pruss *pruss,
+ enum pruss_pru_id pru_id,
+ enum pruss_gpi_mode mode)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int pruss_cfg_xfr_enable(struct pruss *pruss,
+ enum pru_type pru_type,
+ bool enable);
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+#endif /* CONFIG_TI_PRUSS */
+
#endif /* _PRUSS_DRIVER_H_ */
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
index eceda1d1407a..730f77381d55 100644
--- a/include/linux/pseudo_fs.h
+++ b/include/linux/pseudo_fs.h
@@ -5,7 +5,7 @@
struct pseudo_fs_context {
const struct super_operations *ops;
- const struct xattr_handler **xattr;
+ const struct xattr_handler * const *xattr;
const struct dentry_operations *dops;
unsigned long magic;
};
diff --git a/include/linux/psi.h b/include/linux/psi.h
index ab26200c2803..e0745873e3f2 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -23,8 +23,9 @@ void psi_memstall_enter(unsigned long *flags);
void psi_memstall_leave(unsigned long *flags);
int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
-struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, enum psi_res res, struct file *file);
+struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
+ enum psi_res res, struct file *file,
+ struct kernfs_open_file *of);
void psi_trigger_destroy(struct psi_trigger *t);
__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 040c089581c6..f1fd3a8044e0 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -137,6 +137,9 @@ struct psi_trigger {
/* Wait queue for polling */
wait_queue_head_t event_wait;
+ /* Kernfs file for cgroup triggers */
+ struct kernfs_open_file *of;
+
/* Pending event flag */
int event;
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
index 75da8f5f7ad8..c1dc87fc536b 100644
--- a/include/linux/psp-platform-access.h
+++ b/include/linux/psp-platform-access.h
@@ -8,6 +8,10 @@
enum psp_platform_access_msg {
PSP_CMD_NONE = 0x0,
PSP_I2C_REQ_BUS_CMD = 0x64,
+ PSP_DYNAMIC_BOOST_GET_NONCE,
+ PSP_DYNAMIC_BOOST_SET_UID,
+ PSP_DYNAMIC_BOOST_GET_PARAMETER,
+ PSP_DYNAMIC_BOOST_SET_PARAMETER,
};
struct psp_req_buffer_hdr {
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index fdffa6a98d79..1ef4e0f9bd2a 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -77,8 +77,14 @@ struct ptp_system_timestamp {
* nominal frequency in parts per million, but with a
* 16 bit binary fractional field.
*
- * @adjphase: Adjusts the phase offset of the hardware clock.
- * parameter delta: Desired change in nanoseconds.
+ * @adjphase: Indicates that the PHC should use an internal servo
+ * algorithm to correct the provided phase offset.
+ * parameter delta: PHC servo phase adjustment target
+ * in nanoseconds.
+ *
+ * @getmaxphase: Advertises maximum offset that can be provided
+ * to the hardware clock's phase control functionality
+ * through adjphase.
*
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.
@@ -169,6 +175,7 @@ struct ptp_clock_info {
struct ptp_pin_desc *pin_config;
int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm);
int (*adjphase)(struct ptp_clock_info *ptp, s32 phase);
+ s32 (*getmaxphase)(struct ptp_clock_info *ptp);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
diff --git a/include/linux/ptp_mock.h b/include/linux/ptp_mock.h
new file mode 100644
index 000000000000..72eb401034d9
--- /dev/null
+++ b/include/linux/ptp_mock.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Mock-up PTP Hardware Clock driver for virtual network devices
+ *
+ * Copyright 2023 NXP
+ */
+
+#ifndef _PTP_MOCK_H_
+#define _PTP_MOCK_H_
+
+struct device;
+struct mock_phc;
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK_MOCK)
+
+struct mock_phc *mock_phc_create(struct device *dev);
+void mock_phc_destroy(struct mock_phc *phc);
+int mock_phc_index(struct mock_phc *phc);
+
+#else
+
+static inline struct mock_phc *mock_phc_create(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void mock_phc_destroy(struct mock_phc *phc)
+{
+}
+
+static inline int mock_phc_index(struct mock_phc *phc)
+{
+ return -1;
+}
+
+#endif
+
+#endif /* _PTP_MOCK_H_ */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index eaaef3ffec22..90507d4afcd6 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -393,6 +393,10 @@ static inline void user_single_step_report(struct pt_regs *regs)
#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
#endif
+#ifndef exception_ip
+#define exception_ip(x) instruction_pointer(x)
+#endif
+
extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 04ae1d9073a7..fcc2c4496f73 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -41,8 +41,8 @@ struct pwm_args {
};
enum {
- PWMF_REQUESTED = 1 << 0,
- PWMF_EXPORTED = 1 << 1,
+ PWMF_REQUESTED = 0,
+ PWMF_EXPORTED = 1,
};
/*
@@ -69,9 +69,7 @@ struct pwm_state {
* @label: name of the PWM device
* @flags: flags associated with the PWM device
* @hwpwm: per-chip relative index of the PWM device
- * @pwm: global index of the PWM device
* @chip: PWM chip providing this PWM device
- * @chip_data: chip-private data associated with the PWM device
* @args: PWM arguments
* @state: last applied state
* @last: last implemented state (for PWM_DEBUG)
@@ -80,9 +78,7 @@ struct pwm_device {
const char *label;
unsigned long flags;
unsigned int hwpwm;
- unsigned int pwm;
struct pwm_chip *chip;
- void *chip_data;
struct pwm_args args;
struct pwm_state state;
@@ -95,8 +91,8 @@ struct pwm_device {
* @state: state to fill with the current PWM state
*
* The returned PWM state represents the state that was applied by a previous call to
- * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to
- * hardware. If pwm_apply_state() was never called, this returns either the current hardware
+ * pwm_apply_might_sleep(). Drivers may have to slightly tweak that state before programming it to
+ * hardware. If pwm_apply_might_sleep() was never called, this returns either the current hardware
* state (if supported) or the default settings.
*/
static inline void pwm_get_state(const struct pwm_device *pwm,
@@ -114,12 +110,6 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm)
return state.enabled;
}
-static inline void pwm_set_period(struct pwm_device *pwm, u64 period)
-{
- if (pwm)
- pwm->state.period = period;
-}
-
static inline u64 pwm_get_period(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -129,12 +119,6 @@ static inline u64 pwm_get_period(const struct pwm_device *pwm)
return state.period;
}
-static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
-{
- if (pwm)
- pwm->state.duty_cycle = duty;
-}
-
static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -160,20 +144,20 @@ static inline void pwm_get_args(const struct pwm_device *pwm,
}
/**
- * pwm_init_state() - prepare a new state to be applied with pwm_apply_state()
+ * pwm_init_state() - prepare a new state to be applied with pwm_apply_might_sleep()
* @pwm: PWM device
* @state: state to fill with the prepared PWM state
*
* This functions prepares a state that can later be tweaked and applied
- * to the PWM device with pwm_apply_state(). This is a convenient function
+ * to the PWM device with pwm_apply_might_sleep(). This is a convenient function
* that first retrieves the current PWM state and the replaces the period
* and polarity fields with the reference values defined in pwm->args.
* Once the function returns, you can adjust the ->enabled and ->duty_cycle
- * fields according to your needs before calling pwm_apply_state().
+ * fields according to your needs before calling pwm_apply_might_sleep().
*
* ->duty_cycle is initially set to zero to avoid cases where the current
* ->duty_cycle value exceed the pwm_args->period one, which would trigger
- * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle
+ * an error if the user calls pwm_apply_might_sleep() without adjusting ->duty_cycle
* first.
*/
static inline void pwm_init_state(const struct pwm_device *pwm,
@@ -229,7 +213,7 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
*
* pwm_init_state(pwm, &state);
* pwm_set_relative_duty_cycle(&state, 50, 100);
- * pwm_apply_state(pwm, &state);
+ * pwm_apply_might_sleep(pwm, &state);
*
* This functions returns -EINVAL if @duty_cycle and/or @scale are
* inconsistent (@scale == 0 or @duty_cycle > @scale).
@@ -267,7 +251,6 @@ struct pwm_capture {
* @get_state: get the current PWM state. This function is only
* called once per PWM device when the PWM chip is
* registered.
- * @owner: helps prevent removal of modules exporting active PWMs
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
@@ -278,38 +261,40 @@ struct pwm_ops {
const struct pwm_state *state);
int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state);
- struct module *owner;
};
/**
* struct pwm_chip - abstract a PWM controller
* @dev: device providing the PWMs
* @ops: callbacks for this PWM controller
- * @base: number of first PWM controlled by this chip
+ * @owner: module providing this chip
+ * @id: unique number of this PWM chip
* @npwm: number of PWMs controlled by this chip
* @of_xlate: request a PWM device given a device tree PWM specifier
* @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
- * @list: list node for internal use
+ * @atomic: can the driver's ->apply() be called in atomic context
* @pwms: array of PWM devices allocated by the framework
*/
struct pwm_chip {
struct device *dev;
const struct pwm_ops *ops;
- int base;
+ struct module *owner;
+ unsigned int id;
unsigned int npwm;
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ struct pwm_device * (*of_xlate)(struct pwm_chip *chip,
const struct of_phandle_args *args);
unsigned int of_pwm_n_cells;
+ bool atomic;
/* only used internally by the PWM framework */
- struct list_head list;
struct pwm_device *pwms;
};
#if IS_ENABLED(CONFIG_PWM)
/* PWM user APIs */
-int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state);
int pwm_adjust_config(struct pwm_device *pwm);
/**
@@ -337,7 +322,7 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
state.duty_cycle = duty_ns;
state.period = period_ns;
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
/**
@@ -358,7 +343,7 @@ static inline int pwm_enable(struct pwm_device *pwm)
return 0;
state.enabled = true;
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
/**
@@ -377,27 +362,38 @@ static inline void pwm_disable(struct pwm_device *pwm)
return;
state.enabled = false;
- pwm_apply_state(pwm, &state);
+ pwm_apply_might_sleep(pwm, &state);
+}
+
+/**
+ * pwm_might_sleep() - is pwm_apply_atomic() supported?
+ * @pwm: PWM device
+ *
+ * Returns: false if pwm_apply_atomic() can be called from atomic context.
+ */
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
+{
+ return !pwm->chip->atomic;
}
/* PWM provider APIs */
int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
unsigned long timeout);
-int pwm_set_chip_data(struct pwm_device *pwm, void *data);
-void *pwm_get_chip_data(struct pwm_device *pwm);
-int pwmchip_add(struct pwm_chip *chip);
+int __pwmchip_add(struct pwm_chip *chip, struct module *owner);
+#define pwmchip_add(chip) __pwmchip_add(chip, THIS_MODULE)
void pwmchip_remove(struct pwm_chip *chip);
-int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip);
+int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner);
+#define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE)
struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label);
-struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
+struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip,
const struct of_phandle_args *args);
-struct pwm_device *of_pwm_single_xlate(struct pwm_chip *pc,
+struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip,
const struct of_phandle_args *args);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
@@ -408,16 +404,27 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id);
#else
-static inline int pwm_apply_state(struct pwm_device *pwm,
- const struct pwm_state *state)
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
+{
+ return true;
+}
+
+static inline int pwm_apply_might_sleep(struct pwm_device *pwm,
+ const struct pwm_state *state)
{
might_sleep();
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int pwm_apply_atomic(struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ return -EOPNOTSUPP;
}
static inline int pwm_adjust_config(struct pwm_device *pwm)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
@@ -445,16 +452,6 @@ static inline int pwm_capture(struct pwm_device *pwm,
return -EINVAL;
}
-static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
-{
- return -EINVAL;
-}
-
-static inline void *pwm_get_chip_data(struct pwm_device *pwm)
-{
- return NULL;
-}
-
static inline int pwmchip_add(struct pwm_chip *chip)
{
return -EINVAL;
@@ -536,7 +533,14 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
state.period = pwm->args.period;
state.usage_power = false;
- pwm_apply_state(pwm, &state);
+ pwm_apply_might_sleep(pwm, &state);
+}
+
+/* only for backwards-compatibility, new code should not use this */
+static inline int pwm_apply_state(struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ return pwm_apply_might_sleep(pwm, state);
}
struct pwm_lookup {
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
index 90e3045b2dcb..0d3b6ed21628 100644
--- a/include/linux/qed/qed_fcoe_if.h
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -67,9 +67,6 @@ struct qed_fcoe_cb_ops {
u32 (*get_login_failures)(void *cookie);
};
-void qed_fcoe_set_pf_params(struct qed_dev *cdev,
- struct qed_fcoe_pf_params *params);
-
/**
* struct qed_fcoe_ops - qed FCoE operations.
* @common: common operations pointer
diff --git a/include/linux/quota.h b/include/linux/quota.h
index fd692b4a41d5..07071e64abf3 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type)
#define DQ_FAKE_B 3 /* no limits only usage */
#define DQ_READ_B 4 /* dquot was read into memory */
#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */
-#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\
+#define DQ_RELEASING_B 6 /* dquot is in releasing_dquots list waiting
+ * to be cleaned up */
+#define DQ_LASTSET_B 7 /* Following 6 bits (see QIF_) are reserved\
* for the mask of entries set via SETQUOTA\
* quotactl. They are set under dq_data_lock\
* and the quota format handling dquot can\
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 11a4becff3a9..06cc8888199e 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -57,7 +57,7 @@ static inline bool dquot_is_busy(struct dquot *dquot)
{
if (test_bit(DQ_MOD_B, &dquot->dq_flags))
return true;
- if (atomic_read(&dquot->dq_count) > 1)
+ if (atomic_read(&dquot->dq_count) > 0)
return true;
return false;
}
@@ -74,7 +74,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
int dquot_alloc_inode(struct inode *inode);
-int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
+void dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
void dquot_free_inode(struct inode *inode);
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
@@ -257,10 +257,9 @@ static inline void __dquot_free_space(struct inode *inode, qsize_t number,
inode_sub_bytes(inode, number);
}
-static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+static inline void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
inode_add_bytes(inode, number);
- return 0;
}
static inline int dquot_reclaim_space_nodirty(struct inode *inode,
@@ -358,14 +357,10 @@ static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE);
}
-static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
+static inline void dquot_claim_block(struct inode *inode, qsize_t nr)
{
- int ret;
-
- ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
- if (!ret)
- mark_inode_dirty_sync(inode);
- return ret;
+ dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
+ mark_inode_dirty_sync(inode);
}
static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index f29aaaf2eb21..98030accf641 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -84,8 +84,6 @@ extern const struct raid6_calls raid6_intx1;
extern const struct raid6_calls raid6_intx2;
extern const struct raid6_calls raid6_intx4;
extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_intx16;
-extern const struct raid6_calls raid6_intx32;
extern const struct raid6_calls raid6_mmxx1;
extern const struct raid6_calls raid6_mmxx2;
extern const struct raid6_calls raid6_sse1x1;
@@ -108,6 +106,8 @@ extern const struct raid6_calls raid6_vpermxor1;
extern const struct raid6_calls raid6_vpermxor2;
extern const struct raid6_calls raid6_vpermxor4;
extern const struct raid6_calls raid6_vpermxor8;
+extern const struct raid6_calls raid6_lsx;
+extern const struct raid6_calls raid6_lasx;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -123,6 +123,8 @@ extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_recov_calls raid6_recov_neon;
+extern const struct raid6_recov_calls raid6_recov_lsx;
+extern const struct raid6_recov_calls raid6_recov_lasx;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
index 6a9b177d5c41..e50416ba9cd9 100644
--- a/include/linux/raid_class.h
+++ b/include/linux/raid_class.h
@@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
struct raid_template *raid_class_attach(struct raid_function_template *);
void raid_class_release(struct raid_template *);
-
-int __must_check raid_component_add(struct raid_template *, struct device *,
- struct device *);
-
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 917528d102c4..d506dc63dd47 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -7,6 +7,7 @@
struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev);
extern int ramfs_init_fs_context(struct fs_context *fc);
+extern void ramfs_kill_sb(struct super_block *sb);
#ifdef CONFIG_MMU
static inline int
diff --git a/include/linux/range.h b/include/linux/range.h
index 7efb6a9b069b..6ad0b73cb7ad 100644
--- a/include/linux/range.h
+++ b/include/linux/range.h
@@ -31,12 +31,4 @@ int clean_sort_range(struct range *range, int az);
void sort_range(struct range *range, int nr_range);
-#define MAX_RESOURCE ((resource_size_t)~0)
-static inline resource_size_t cap_resource(u64 val)
-{
- if (val > MAX_RESOURCE)
- return MAX_RESOURCE;
-
- return val;
-}
#endif
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 7ee7ed5de722..6dbc5a1bf6a8 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -60,6 +60,32 @@ rb_insert_augmented_cached(struct rb_node *node,
rb_insert_augmented(node, &root->rb_root, augment);
}
+static __always_inline struct rb_node *
+rb_add_augmented_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *),
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ augment->propagate(parent, NULL); /* suboptimal */
+ rb_insert_augmented_cached(node, tree, leftmost, augment);
+
+ return leftmost ? node : NULL;
+}
+
/*
* Template for declaring augmented rbtree callbacks (generic case)
*
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index 3d1a9e716b80..6a0999c26c7c 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
do {
seq = raw_read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
- } while (read_seqcount_latch_retry(&root->seq, seq));
+ } while (raw_read_seqcount_latch_retry(&root->seq, seq));
return node;
}
diff --git a/include/linux/rcu_notifier.h b/include/linux/rcu_notifier.h
new file mode 100644
index 000000000000..5640f024773b
--- /dev/null
+++ b/include/linux/rcu_notifier.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Read-Copy Update notifiers, initially RCU CPU stall notifier.
+ * Separate from rcupdate.h to avoid #include loops.
+ *
+ * Copyright (C) 2023 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_RCU_NOTIFIER_H
+#define __LINUX_RCU_NOTIFIER_H
+
+// Actions for RCU CPU stall notifier calls.
+#define RCU_STALL_NOTIFY_NORM 1
+#define RCU_STALL_NOTIFY_EXP 2
+
+#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+int rcu_stall_chain_notifier_register(struct notifier_block *n);
+int rcu_stall_chain_notifier_unregister(struct notifier_block *n);
+
+#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
+
+// No RCU CPU stall warnings in Tiny RCU.
+static inline int rcu_stall_chain_notifier_register(struct notifier_block *n) { return -EEXIST; }
+static inline int rcu_stall_chain_notifier_unregister(struct notifier_block *n) { return -ENOENT; }
+
+#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
+
+#endif /* __LINUX_RCU_NOTIFIER_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index d29740be4833..3dc1e58865f7 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -355,7 +355,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
})
/**
- * list_next_or_null_rcu - get the first element from a list
+ * list_next_or_null_rcu - get the next element from a list
* @head: the head for the list.
* @ptr: the list head to take the next element from.
* @type: the type of the struct this is embedded in.
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index ba4c00dd8005..89186c499dd4 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -101,7 +101,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
{
struct hlist_nulls_node *first = h->first;
- n->next = first;
+ WRITE_ONCE(n->next, first);
WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
@@ -137,7 +137,7 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
last = i;
if (last) {
- n->next = last->next;
+ WRITE_ONCE(n->next, last->next);
n->pprev = &last->next;
rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
} else {
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index dcd2cf1e8326..0746b1b0b663 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -27,15 +27,13 @@
#include <linux/preempt.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
#include <asm/processor.h>
#include <linux/cpumask.h>
#include <linux/context_tracking_irq.h>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
-#define ulong2long(a) (*(long *)(&(a)))
-#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
-#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
@@ -121,8 +119,6 @@ static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
void rcu_init(void);
extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
-void rcu_report_dead(unsigned int cpu);
-void rcutree_migrate_callbacks(int cpu);
#ifdef CONFIG_TASKS_RCU_GENERIC
void rcu_init_tasks_generic(void);
@@ -156,31 +152,6 @@ static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
static inline void rcu_nocb_flush_deferred_wakeup(void) { }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
-/**
- * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
- * @a: Code that RCU needs to pay attention to.
- *
- * RCU read-side critical sections are forbidden in the inner idle loop,
- * that is, between the ct_idle_enter() and the ct_idle_exit() -- RCU
- * will happily ignore any such read-side critical sections. However,
- * things like powertop need tracepoints in the inner idle loop.
- *
- * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
- * will tell RCU that it needs to pay attention, invoke its argument
- * (in this example, calling the do_something_with_RCU() function),
- * and then tell RCU to go back to ignoring this CPU. It is permissible
- * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is
- * on the order of a million or so, even on 32-bit systems). It is
- * not legal to block within RCU_NONIDLE(), nor is it permissible to
- * transfer control either into or out of RCU_NONIDLE()'s statement.
- */
-#define RCU_NONIDLE(a) \
- do { \
- ct_irq_enter_irqson(); \
- do { a; } while (0); \
- ct_irq_exit_irqson(); \
- } while (0)
-
/*
* Note a quasi-voluntary context switch for RCU-tasks's benefit.
* This is a macro rather than an inline function to avoid #include hell.
@@ -327,6 +298,11 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
}
+static inline void rcu_try_lock_acquire(struct lockdep_map *map)
+{
+ lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
+}
+
static inline void rcu_lock_release(struct lockdep_map *map)
{
lock_release(map, _THIS_IP_);
@@ -341,6 +317,7 @@ int rcu_read_lock_any_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
# define rcu_lock_acquire(a) do { } while (0)
+# define rcu_try_lock_acquire(a) do { } while (0)
# define rcu_lock_release(a) do { } while (0)
static inline int rcu_read_lock_held(void)
@@ -957,9 +934,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/**
* kfree_rcu() - kfree an object after a grace period.
- * @ptr: pointer to kfree for both single- and double-argument invocations.
- * @rhf: the name of the struct rcu_head within the type of @ptr,
- * but only for double-argument invocations.
+ * @ptr: pointer to kfree for double-argument invocations.
+ * @rhf: the name of the struct rcu_head within the type of @ptr.
*
* Many rcu callbacks functions just call kfree() on the base structure.
* These functions are trivial, but their size adds up, and furthermore
@@ -984,26 +960,18 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* The BUILD_BUG_ON check must not involve any function calls, hence the
* checks are done in macros here.
*/
-#define kfree_rcu(ptr, rhf...) kvfree_rcu(ptr, ## rhf)
+#define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
+#define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
/**
- * kvfree_rcu() - kvfree an object after a grace period.
- *
- * This macro consists of one or two arguments and it is
- * based on whether an object is head-less or not. If it
- * has a head then a semantic stays the same as it used
- * to be before:
- *
- * kvfree_rcu(ptr, rhf);
- *
- * where @ptr is a pointer to kvfree(), @rhf is the name
- * of the rcu_head structure within the type of @ptr.
+ * kfree_rcu_mightsleep() - kfree an object after a grace period.
+ * @ptr: pointer to kfree for single-argument invocations.
*
* When it comes to head-less variant, only one argument
* is passed and that is just a pointer which has to be
* freed after a grace period. Therefore the semantic is
*
- * kvfree_rcu(ptr);
+ * kfree_rcu_mightsleep(ptr);
*
* where @ptr is the pointer to be freed by kvfree().
*
@@ -1012,13 +980,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* annotation. Otherwise, please switch and embed the
* rcu_head structure within the type of @ptr.
*/
-#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
- kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
-
+#define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
-#define kfree_rcu_mightsleep(ptr) kvfree_rcu_mightsleep(ptr)
-#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
#define kvfree_rcu_arg_2(ptr, rhf) \
do { \
typeof (ptr) ___p = (ptr); \
@@ -1095,4 +1059,6 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
+DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index 9bc8cbb33340..eda493200663 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -87,6 +87,7 @@ static inline void rcu_read_unlock_trace(void)
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
void synchronize_rcu_tasks_trace(void);
void rcu_barrier_tasks_trace(void);
+struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
#else
/*
* The BPF JIT forms these addresses even when it doesn't call these
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 699b938358bf..d07f0848802e 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -8,6 +8,7 @@
#include <linux/rcupdate.h>
#include <linux/completion.h>
+#include <linux/sched.h>
/*
* Structure allowing asynchronous waiting on RCU.
@@ -42,6 +43,11 @@ do { \
* call_srcu() function, with this wrapper supplying the pointer to the
* corresponding srcu_struct.
*
+ * Note that call_rcu_hurry() should be used instead of call_rcu()
+ * because in kernels built with CONFIG_RCU_LAZY=y the delay between the
+ * invocation of call_rcu() and that of the corresponding RCU callback
+ * can be multiple seconds.
+ *
* The first argument tells Tiny RCU's _wait_rcu_gp() not to
* bother waiting for RCU. The reason for this is because anywhere
* synchronize_rcu_mult() can be called is automatically already a full
@@ -50,4 +56,13 @@ do { \
#define synchronize_rcu_mult(...) \
_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+static inline void cond_resched_rcu(void)
+{
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
+ rcu_read_unlock();
+ cond_resched();
+ rcu_read_lock();
+#endif
+}
+
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7f17acf29dda..d9ac7b136aea 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -138,6 +138,8 @@ static inline int rcu_needs_cpu(void)
return 0;
}
+static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
+
/*
* Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches.
@@ -169,6 +171,6 @@ static inline void rcu_all_qs(void) { barrier(); }
#define rcutree_offline_cpu NULL
#define rcutree_dead_cpu NULL
#define rcutree_dying_cpu NULL
-static inline void rcu_cpu_starting(unsigned int cpu) { }
+static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 56bccb5a8fde..254244202ea9 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -21,6 +21,7 @@ void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(void);
void rcu_cpu_stall_reset(void);
+void rcu_request_urgent_qs_task(struct task_struct *t);
/*
* Note a virtualization-based context switch. This is simply a
@@ -36,7 +37,6 @@ void synchronize_rcu_expedited(void);
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
void rcu_barrier(void);
-bool rcu_eqs_special_set(int cpu);
void rcu_momentary_dyntick_idle(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
@@ -110,9 +110,21 @@ void rcu_all_qs(void);
/* RCUtree hotplug events */
int rcutree_prepare_cpu(unsigned int cpu);
int rcutree_online_cpu(unsigned int cpu);
-int rcutree_offline_cpu(unsigned int cpu);
+void rcutree_report_cpu_starting(unsigned int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu);
-void rcu_cpu_starting(unsigned int cpu);
+int rcutree_offline_cpu(unsigned int cpu);
+#else
+#define rcutree_dead_cpu NULL
+#define rcutree_dying_cpu NULL
+#define rcutree_offline_cpu NULL
+#endif
+
+void rcutree_migrate_callbacks(int cpu);
+
+/* Called from hotplug and also arm64 early secondary boot failure */
+void rcutree_report_cpu_dead(void);
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 8052d34da782..27343424225c 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -49,9 +49,9 @@ static inline void prepare_to_rcuwait(struct rcuwait *w)
extern void finish_rcuwait(struct rcuwait *w);
-#define rcuwait_wait_event(w, condition, state) \
+#define ___rcuwait_wait_event(w, condition, state, ret, cmd) \
({ \
- int __ret = 0; \
+ long __ret = ret; \
prepare_to_rcuwait(w); \
for (;;) { \
/* \
@@ -67,10 +67,27 @@ extern void finish_rcuwait(struct rcuwait *w);
break; \
} \
\
- schedule(); \
+ cmd; \
} \
finish_rcuwait(w); \
__ret; \
})
+#define rcuwait_wait_event(w, condition, state) \
+ ___rcuwait_wait_event(w, condition, state, 0, schedule())
+
+#define __rcuwait_wait_event_timeout(w, condition, state, timeout) \
+ ___rcuwait_wait_event(w, ___wait_cond_timeout(condition), \
+ state, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define rcuwait_wait_event_timeout(w, condition, state, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __rcuwait_wait_event_timeout(w, condition, \
+ state, timeout); \
+ __ret; \
+})
+
#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 2b6bb593be5b..abcdde4df697 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -129,11 +129,14 @@ enum sys_off_mode {
* @cb_data: User's callback data.
* @cmd: Command string. Currently used only by the sys-off restart mode,
* NULL otherwise.
+ * @dev: Device of the sys-off handler. Only if known (devm_register_*),
+ * NULL otherwise.
*/
struct sys_off_data {
int mode;
void *cb_data;
const char *cmd;
+ struct device *dev;
};
struct sys_off_handler *
@@ -174,7 +177,17 @@ void ctrl_alt_del(void);
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
-void hw_protection_shutdown(const char *reason, int ms_until_forced);
+void __hw_protection_shutdown(const char *reason, int ms_until_forced, bool shutdown);
+
+static inline void hw_protection_reboot(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, false);
+}
+
+static inline void hw_protection_shutdown(const char *reason, int ms_until_forced)
+{
+ __hw_protection_shutdown(reason, ms_until_forced, true);
+}
/*
* Emergency restart, callable from an interrupt handler.
diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
index 9ca353ab712b..8eac4f3d5254 100644
--- a/include/linux/ref_tracker.h
+++ b/include/linux/ref_tracker.h
@@ -17,12 +17,15 @@ struct ref_tracker_dir {
bool dead;
struct list_head list; /* List of active trackers */
struct list_head quarantine; /* List of dead trackers */
+ char name[32];
#endif
};
#ifdef CONFIG_REF_TRACKER
+
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
- unsigned int quarantine_count)
+ unsigned int quarantine_count,
+ const char *name)
{
INIT_LIST_HEAD(&dir->list);
INIT_LIST_HEAD(&dir->quarantine);
@@ -31,14 +34,20 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
dir->dead = false;
refcount_set(&dir->untracked, 1);
refcount_set(&dir->no_tracker, 1);
+ strscpy(dir->name, name, sizeof(dir->name));
stack_depot_init();
}
void ref_tracker_dir_exit(struct ref_tracker_dir *dir);
+void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit);
+
void ref_tracker_dir_print(struct ref_tracker_dir *dir,
unsigned int display_limit);
+int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size);
+
int ref_tracker_alloc(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp, gfp_t gfp);
@@ -48,7 +57,8 @@ int ref_tracker_free(struct ref_tracker_dir *dir,
#else /* CONFIG_REF_TRACKER */
static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
- unsigned int quarantine_count)
+ unsigned int quarantine_count,
+ const char *name)
{
}
@@ -56,11 +66,22 @@ static inline void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
{
}
+static inline void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+}
+
static inline void ref_tracker_dir_print(struct ref_tracker_dir *dir,
unsigned int display_limit)
{
}
+static inline int ref_tracker_dir_snprint(struct ref_tracker_dir *dir,
+ char *buf, size_t size)
+{
+ return 0;
+}
+
static inline int ref_tracker_alloc(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp,
gfp_t gfp)
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a62fcca97486..85c6df0d1bef 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -96,22 +96,11 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/limits.h>
+#include <linux/refcount_types.h>
#include <linux/spinlock_types.h>
struct mutex;
-/**
- * typedef refcount_t - variant of atomic_t specialized for reference counts
- * @refs: atomic_t counter field
- *
- * The counter saturates at REFCOUNT_SATURATED and will not move once
- * there. This avoids wrapping the counter and causing 'spurious'
- * use-after-free bugs.
- */
-typedef struct refcount_struct {
- atomic_t refs;
-} refcount_t;
-
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
#define REFCOUNT_MAX INT_MAX
#define REFCOUNT_SATURATED (INT_MIN / 2)
diff --git a/include/linux/refcount_types.h b/include/linux/refcount_types.h
new file mode 100644
index 000000000000..162004f06edf
--- /dev/null
+++ b/include/linux/refcount_types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_REFCOUNT_TYPES_H
+#define _LINUX_REFCOUNT_TYPES_H
+
+#include <linux/types.h>
+
+/**
+ * typedef refcount_t - variant of atomic_t specialized for reference counts
+ * @refs: atomic_t counter field
+ *
+ * The counter saturates at REFCOUNT_SATURATED and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free bugs.
+ */
+typedef struct refcount_struct {
+ atomic_t refs;
+} refcount_t;
+
+#endif /* _LINUX_REFCOUNT_TYPES_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index c2b9cc5db824..c9182a47736e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1287,6 +1287,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
void regcache_cache_only(struct regmap *map, bool enable);
void regcache_cache_bypass(struct regmap *map, bool enable);
void regcache_mark_dirty(struct regmap *map);
+bool regcache_reg_cached(struct regmap *map, unsigned int reg);
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table);
@@ -1528,9 +1529,6 @@ struct regmap_irq_chip_data;
* status_base. Should contain num_regs arrays.
* Can be provided for chips with more complex mapping than
* 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ...
- * When used with not_fixed_stride, each one-element array
- * member contains offset calculated as address from each
- * peripheral to first peripheral.
* @num_main_regs: Number of 'main status' irq registers for chips which have
* main_status set.
*
@@ -1542,10 +1540,6 @@ struct regmap_irq_chip_data;
* @ack_base: Base ack address. If zero then the chip is clear on read.
* Using zero value is possible with @use_ack bit.
* @wake_base: Base address for wake enables. If zero unsupported.
- * @type_base: Base address for irq type. If zero unsupported. Deprecated,
- * use @config_base instead.
- * @virt_reg_base: Base addresses for extra config regs. Deprecated, use
- * @config_base instead.
* @config_base: Base address for IRQ type config regs. If null unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous.
* @init_ack_masked: Ack all masked interrupts once during initalization.
@@ -1571,11 +1565,6 @@ struct regmap_irq_chip_data;
* registers before unmasking interrupts to clear any bits
* set when they were masked.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
- * @not_fixed_stride: Used when chip peripherals are not laid out with fixed
- * stride. Must be used with sub_reg_offsets containing the
- * offsets to each peripheral. Deprecated; the same thing
- * can be accomplished with a @get_irq_reg callback, without
- * the need for a @sub_reg_offsets table.
* @no_status: No status register: all interrupts assumed generated by device.
*
* @num_regs: Number of registers in each control bank.
@@ -1583,12 +1572,6 @@ struct regmap_irq_chip_data;
* @irqs: Descriptors for individual IRQs. Interrupt numbers are
* assigned based on the index in the array of the interrupt.
* @num_irqs: Number of descriptors.
- *
- * @num_type_reg: Number of type registers. Deprecated, use config registers
- * instead.
- * @num_virt_regs: Number of non-standard irq configuration registers.
- * If zero unsupported. Deprecated, use config registers
- * instead.
* @num_config_bases: Number of config base registers.
* @num_config_regs: Number of config registers for each config base register.
*
@@ -1598,15 +1581,12 @@ struct regmap_irq_chip_data;
* after handling the interrupts in regmap_irq_handler().
* @handle_mask_sync: Callback used to handle IRQ mask syncs. The index will be
* in the range [0, num_regs)
- * @set_type_virt: Driver specific callback to extend regmap_irq_set_type()
- * and configure virt regs. Deprecated, use @set_type_config
- * callback and config registers instead.
* @set_type_config: Callback used for configuring irq types.
* @get_irq_reg: Callback for mapping (base register, index) pairs to register
* addresses. The base register will be one of @status_base,
* @mask_base, etc., @main_status, or any of @config_base.
* The index will be in the range [0, num_main_regs[ for the
- * main status base, [0, num_type_settings[ for any config
+ * main status base, [0, num_config_regs[ for any config
* register base, and [0, num_regs[ for any other base.
* If unspecified then regmap_irq_get_irq_reg_linear() is used.
* @irq_drv_data: Driver specific IRQ data which is passed as parameter when
@@ -1629,8 +1609,6 @@ struct regmap_irq_chip {
unsigned int unmask_base;
unsigned int ack_base;
unsigned int wake_base;
- unsigned int type_base;
- unsigned int *virt_reg_base;
const unsigned int *config_base;
unsigned int irq_reg_stride;
unsigned int init_ack_masked:1;
@@ -1643,7 +1621,6 @@ struct regmap_irq_chip {
unsigned int type_in_mask:1;
unsigned int clear_on_unmask:1;
unsigned int runtime_pm:1;
- unsigned int not_fixed_stride:1;
unsigned int no_status:1;
int num_regs;
@@ -1651,18 +1628,13 @@ struct regmap_irq_chip {
const struct regmap_irq *irqs;
int num_irqs;
- int num_type_reg;
- int num_virt_regs;
int num_config_bases;
int num_config_regs;
int (*handle_pre_irq)(void *irq_drv_data);
int (*handle_post_irq)(void *irq_drv_data);
- int (*handle_mask_sync)(struct regmap *map, int index,
- unsigned int mask_buf_def,
+ int (*handle_mask_sync)(int index, unsigned int mask_buf_def,
unsigned int mask_buf, void *irq_drv_data);
- int (*set_type_virt)(unsigned int **buf, unsigned int type,
- unsigned long hwirq, int reg);
int (*set_type_config)(unsigned int **buf, unsigned int type,
const struct regmap_irq *irq_data, int idx,
void *irq_drv_data);
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 39b666b40ea6..4660582a3302 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -33,6 +33,7 @@
#include <linux/err.h>
#include <linux/suspend.h>
+#include <regulator/regulator.h>
struct device;
struct notifier_block;
@@ -85,52 +86,6 @@ struct regulator_dev;
#define REGULATOR_MODE_STANDBY 0x8
/*
- * Regulator notifier events.
- *
- * UNDER_VOLTAGE Regulator output is under voltage.
- * OVER_CURRENT Regulator output current is too high.
- * REGULATION_OUT Regulator output is out of regulation.
- * FAIL Regulator output has failed.
- * OVER_TEMP Regulator over temp.
- * FORCE_DISABLE Regulator forcibly shut down by software.
- * VOLTAGE_CHANGE Regulator voltage changed.
- * Data passed is old voltage cast to (void *).
- * DISABLE Regulator was disabled.
- * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
- * Data passed is "struct pre_voltage_change_data"
- * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
- * Data passed is old voltage cast to (void *).
- * PRE_DISABLE Regulator is about to be disabled
- * ABORT_DISABLE Regulator disable failed for some reason
- *
- * NOTE: These events can be OR'ed together when passed into handler.
- */
-
-#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
-#define REGULATOR_EVENT_OVER_CURRENT 0x02
-#define REGULATOR_EVENT_REGULATION_OUT 0x04
-#define REGULATOR_EVENT_FAIL 0x08
-#define REGULATOR_EVENT_OVER_TEMP 0x10
-#define REGULATOR_EVENT_FORCE_DISABLE 0x20
-#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
-#define REGULATOR_EVENT_DISABLE 0x80
-#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
-#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
-#define REGULATOR_EVENT_PRE_DISABLE 0x400
-#define REGULATOR_EVENT_ABORT_DISABLE 0x800
-#define REGULATOR_EVENT_ENABLE 0x1000
-/*
- * Following notifications should be emitted only if detected condition
- * is such that the HW is likely to still be working but consumers should
- * take a recovery action to prevent problems esacalating into errors.
- */
-#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
-#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
-#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
-#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
-#define REGULATOR_EVENT_WARN_MASK 0x1E000
-
-/*
* Regulator errors that can be queried using regulator_get_error_flags
*
* UNDER_VOLTAGE Regulator output is under voltage.
diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h
index f90df9ee703e..d58ff273157e 100644
--- a/include/linux/regulator/db8500-prcmu.h
+++ b/include/linux/regulator/db8500-prcmu.h
@@ -35,10 +35,4 @@ enum db8500_regulator_id {
DB8500_NUM_REGULATORS
};
-/*
- * Exported interface for CPUIdle only. This function is called with all
- * interrupts turned off.
- */
-int power_state_active_is_enabled(void);
-
#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index d3b4a3d4514a..22a07c0900a4 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -51,12 +51,7 @@ enum regulator_detection_severity {
/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
-{ \
- .min = _min_uV, \
- .min_sel = _min_sel, \
- .max_sel = _max_sel, \
- .step = _step_uV, \
-}
+ LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV)
/**
* struct regulator_ops - regulator operations.
@@ -292,11 +287,12 @@ enum regulator_type {
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @min_dropout_uV: The minimum dropout voltage this regulator can handle
* @linear_ranges: A constant table of possible voltage ranges.
- * @linear_range_selectors: A constant table of voltage range selectors.
- * If pickable ranges are used each range must
- * have corresponding selector here.
+ * @linear_range_selectors_bitfield: A constant table of voltage range
+ * selectors as bitfield values. If
+ * pickable ranges are used each range
+ * must have corresponding selector here.
* @n_linear_ranges: Number of entries in the @linear_ranges (and in
- * linear_range_selectors if used) table(s).
+ * linear_range_selectors_bitfield if used) table(s).
* @volt_table: Voltage mapping table (if table based mapping)
* @curr_table: Current limit mapping table (if table based mapping)
*
@@ -384,7 +380,7 @@ struct regulator_desc {
int min_dropout_uV;
const struct linear_range *linear_ranges;
- const unsigned int *linear_range_selectors;
+ const unsigned int *linear_range_selectors_bitfield;
int n_linear_ranges;
@@ -758,6 +754,8 @@ int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
int min_uA, int max_uA);
int regulator_get_current_limit_regmap(struct regulator_dev *rdev);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
+int regulator_find_closest_bigger(unsigned int target, const unsigned int *table,
+ unsigned int num_sel, unsigned int *sel);
int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay);
int regulator_sync_voltage_rdev(struct regulator_dev *rdev);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 621b7f4a3639..0cd76d264727 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -49,6 +49,13 @@ struct regulator;
#define DISABLE_IN_SUSPEND 1
#define ENABLE_IN_SUSPEND 2
+/*
+ * Default time window (in milliseconds) following a critical under-voltage
+ * event during which less critical actions can be safely carried out by the
+ * system.
+ */
+#define REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS 10
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -127,6 +134,8 @@ struct notification_limit {
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
* @soft_start: Enable soft start so that voltage ramps slowly.
* @pull_down: Enable pull down when regulator is disabled.
+ * @system_critical: Set if the regulator is critical to system stability or
+ * functionality.
* @over_current_protection: Auto disable on over current event.
*
* @over_current_detection: Configure over current limits.
@@ -153,6 +162,13 @@ struct notification_limit {
* regulator_active_discharge values are used for
* initialisation.
* @enable_time: Turn-on time of the rails (unit: microseconds)
+ * @uv_less_critical_window_ms: Specifies the time window (in milliseconds)
+ * following a critical under-voltage (UV) event
+ * during which less critical actions can be
+ * safely carried out by the system (for example
+ * logging). After this time window more critical
+ * actions should be done (for example prevent
+ * HW damage).
*/
struct regulation_constraints {
@@ -204,6 +220,7 @@ struct regulation_constraints {
unsigned int settling_time_up;
unsigned int settling_time_down;
unsigned int enable_time;
+ unsigned int uv_less_critical_window_ms;
unsigned int active_discharge;
@@ -214,6 +231,7 @@ struct regulation_constraints {
unsigned ramp_disable:1; /* disable ramp delay */
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
+ unsigned system_critical:1; /* critical to system stability */
unsigned over_current_protection:1; /* auto disable on over current */
unsigned over_current_detection:1; /* notify on over current */
unsigned over_voltage_detection:1; /* notify on over voltage */
diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h
index bdcf83cd719e..562386f9b80e 100644
--- a/include/linux/regulator/mt6358-regulator.h
+++ b/include/linux/regulator/mt6358-regulator.h
@@ -41,15 +41,12 @@ enum {
MT6358_ID_VIO28,
MT6358_ID_VA12,
MT6358_ID_VRF18,
- MT6358_ID_VCN33_BT,
- MT6358_ID_VCN33_WIFI,
+ MT6358_ID_VCN33,
MT6358_ID_VCAMA2,
MT6358_ID_VMC,
MT6358_ID_VLDO28,
MT6358_ID_VAUD28,
MT6358_ID_VSIM2,
- MT6358_ID_VCORE_SSHUB,
- MT6358_ID_VSRAM_OTHERS_SSHUB,
MT6358_ID_RG_MAX,
};
@@ -85,13 +82,13 @@ enum {
MT6366_ID_VIO28,
MT6366_ID_VA12,
MT6366_ID_VRF18,
- MT6366_ID_VCN33_BT,
- MT6366_ID_VCN33_WIFI,
+ MT6366_ID_VCN33,
MT6366_ID_VMC,
MT6366_ID_VAUD28,
MT6366_ID_VSIM2,
- MT6366_ID_VCORE_SSHUB,
- MT6366_ID_VSRAM_OTHERS_SSHUB,
+ MT6366_ID_VM18,
+ MT6366_ID_VMDDR,
+ MT6366_ID_VSRAM_CORE,
MT6366_ID_RG_MAX,
};
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index fe8978eb69f1..b4795698d8c2 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -690,6 +690,10 @@ int rproc_detach(struct rproc *rproc);
int rproc_set_firmware(struct rproc *rproc, const char *fw_name);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
+
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
void rproc_coredump_using_sections(struct rproc *rproc);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
int rproc_coredump_add_custom_segment(struct rproc *rproc,
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 8334eeacfec5..66942d7fba7f 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -94,7 +94,7 @@ struct rdt_domain {
* zero CBM.
* @shareable_bits: Bitmask of shareable resource with other
* executing entities
- * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
+ * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid.
* @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
* level has CPU scope.
*/
@@ -102,7 +102,7 @@ struct resctrl_cache {
unsigned int cbm_len;
unsigned int min_cbm_bits;
unsigned int shareable_bits;
- bool arch_has_sparse_bitmaps;
+ bool arch_has_sparse_bitmasks;
bool arch_has_per_cpu_cfg;
};
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 980a65594412..13f17676c5f4 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -7,8 +7,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/time64.h>
+struct __kernel_timespec;
struct timespec;
struct old_timespec32;
struct pollfd;
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
index 285189454449..e0135e0adae0 100644
--- a/include/linux/resume_user_mode.h
+++ b/include/linux/resume_user_mode.h
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/task_work.h>
#include <linux/memcontrol.h>
+#include <linux/rseq.h>
#include <linux/blk-cgroup.h>
/**
@@ -55,7 +56,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
}
#endif
- mem_cgroup_handle_over_high();
+ mem_cgroup_handle_over_high(GFP_KERNEL);
blkcg_maybe_throttle_current();
rseq_handle_notify_resume(NULL, regs);
diff --git a/include/linux/rethook.h b/include/linux/rethook.h
index c8ac1e5afcd1..ba60962805f6 100644
--- a/include/linux/rethook.h
+++ b/include/linux/rethook.h
@@ -6,15 +6,14 @@
#define _LINUX_RETHOOK_H
#include <linux/compiler.h>
-#include <linux/freelist.h>
+#include <linux/objpool.h>
#include <linux/kallsyms.h>
#include <linux/llist.h>
#include <linux/rcupdate.h>
-#include <linux/refcount.h>
struct rethook_node;
-typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs *);
+typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long, struct pt_regs *);
/**
* struct rethook - The rethook management data structure.
@@ -29,15 +28,18 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs
*/
struct rethook {
void *data;
- rethook_handler_t handler;
- struct freelist_head pool;
- refcount_t ref;
+ /*
+ * To avoid sparse warnings, this uses a raw function pointer with
+ * __rcu, instead of rethook_handler_t. But this must be same as
+ * rethook_handler_t.
+ */
+ void (__rcu *handler) (struct rethook_node *, void *, unsigned long, struct pt_regs *);
+ struct objpool_head pool;
struct rcu_head rcu;
};
/**
* struct rethook_node - The rethook shadow-stack entry node.
- * @freelist: The freelist, linked to struct rethook::pool.
* @rcu: The rcu_head for deferred freeing.
* @llist: The llist, linked to a struct task_struct::rethooks.
* @rethook: The pointer to the struct rethook.
@@ -48,19 +50,16 @@ struct rethook {
* on each entry of the shadow stack.
*/
struct rethook_node {
- union {
- struct freelist_node freelist;
- struct rcu_head rcu;
- };
+ struct rcu_head rcu;
struct llist_node llist;
struct rethook *rethook;
unsigned long ret_addr;
unsigned long frame;
};
-struct rethook *rethook_alloc(void *data, rethook_handler_t handler);
+struct rethook *rethook_alloc(void *data, rethook_handler_t handler, int size, int num);
+void rethook_stop(struct rethook *rh);
void rethook_free(struct rethook *rh);
-void rethook_add_node(struct rethook *rh, struct rethook_node *node);
struct rethook_node *rethook_try_get(struct rethook *rh);
void rethook_recycle(struct rethook_node *node);
void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount);
@@ -97,4 +96,3 @@ void rethook_flush_task(struct task_struct *tk);
#endif
#endif
-
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
index 57467cbf4c5b..b6f3797277ff 100644
--- a/include/linux/rhashtable-types.h
+++ b/include/linux/rhashtable-types.h
@@ -12,7 +12,7 @@
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/workqueue_types.h>
struct rhash_head {
struct rhash_head __rcu *next;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 782e14f62201..fa802db216f9 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -141,6 +141,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer);
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
@@ -191,15 +192,24 @@ bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
-void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
+struct buffer_data_read_page;
+struct buffer_data_read_page *
+ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
+ struct buffer_data_read_page *page);
+int ring_buffer_read_page(struct trace_buffer *buffer,
+ struct buffer_data_read_page *data_page,
size_t len, int cpu, int full);
+void *ring_buffer_read_page_data(struct buffer_data_read_page *page);
struct trace_seq;
int ring_buffer_print_entry_header(struct trace_seq *s);
-int ring_buffer_print_page_header(struct trace_seq *s);
+int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s);
+
+int ring_buffer_subbuf_order_get(struct trace_buffer *buffer);
+int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order);
+int ring_buffer_subbuf_size_get(struct trace_buffer *buffer);
enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b87d01660412..b7944a833668 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -121,6 +121,11 @@ static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
down_write(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
+{
+ return down_write_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
{
up_write(&anon_vma->root->rwsem);
@@ -172,131 +177,323 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio);
typedef int __bitwise rmap_t;
/*
- * No special request: if the page is a subpage of a compound page, it is
- * mapped via a PTE. The mapped (sub)page is possibly shared between processes.
+ * No special request: A mapped anonymous (sub)page is possibly shared between
+ * processes.
*/
#define RMAP_NONE ((__force rmap_t)0)
-/* The (sub)page is exclusive to a single process. */
+/* The anonymous (sub)page is exclusive to a single process. */
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
/*
- * The compound page is not mapped via PTEs, but instead via a single PMD and
- * should be accounted accordingly.
+ * Internally, we're using an enum to specify the granularity. We make the
+ * compiler emit specialized code for each granularity.
*/
-#define RMAP_COMPOUND ((__force rmap_t)BIT(1))
+enum rmap_level {
+ RMAP_LEVEL_PTE = 0,
+ RMAP_LEVEL_PMD,
+};
+
+static inline void __folio_rmap_sanity_checks(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ /* hugetlb folios are handled separately. */
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+
+ /*
+ * TODO: we get driver-allocated folios that have nothing to do with
+ * the rmap using vm_insert_page(); therefore, we cannot assume that
+ * folio_test_large_rmappable() holds for large folios. We should
+ * handle any desired mapcount+stats accounting for these folios in
+ * VM_MIXEDMAP VMAs separately, and then sanity-check here that
+ * we really only get rmappable folios.
+ */
+
+ VM_WARN_ON_ONCE(nr_pages <= 0);
+ VM_WARN_ON_FOLIO(page_folio(page) != folio, folio);
+ VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ break;
+ case RMAP_LEVEL_PMD:
+ /*
+ * We don't support folios larger than a single PMD yet. So
+ * when RMAP_LEVEL_PMD is set, we assume that we are creating
+ * a single "entire" mapping of the folio.
+ */
+ VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
+ VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
+ break;
+ default:
+ VM_WARN_ON_ONCE(true);
+ }
+}
/*
* rmap interfaces called when adding or removing pte of page
*/
-void page_move_anon_rmap(struct page *, struct vm_area_struct *);
-void page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long address, rmap_t flags);
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long address);
+void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
+void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
+#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \
+ folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
+void folio_add_anon_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-void page_add_file_rmap(struct page *, struct vm_area_struct *,
- bool compound);
-void page_remove_rmap(struct page *, struct vm_area_struct *,
- bool compound);
-
-void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
+void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_add_file_rmap_pte(folio, page, vma) \
+ folio_add_file_rmap_ptes(folio, page, 1, vma)
+void folio_add_file_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_remove_rmap_pte(folio, page, vma) \
+ folio_remove_rmap_ptes(folio, page, 1, vma)
+void folio_remove_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+
+void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
-void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-static inline void __page_dup_rmap(struct page *page, bool compound)
+/* See folio_try_dup_anon_rmap_*() */
+static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
+ struct vm_area_struct *vma)
{
- if (compound) {
- struct folio *folio = (struct folio *)page;
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
- VM_BUG_ON_PAGE(compound && !PageHead(page), page);
- atomic_inc(&folio->_entire_mapcount);
- } else {
- atomic_inc(&page->_mapcount);
+ if (PageAnonExclusive(&folio->page)) {
+ if (unlikely(folio_needs_cow_for_dma(vma, folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
}
+ atomic_inc(&folio->_entire_mapcount);
+ return 0;
+}
+
+/* See folio_try_share_anon_rmap_*() */
+static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(folio_maybe_dma_pinned(folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
+ return 0;
+}
+
+static inline void hugetlb_add_file_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
+
+ atomic_inc(&folio->_entire_mapcount);
+}
+
+static inline void hugetlb_remove_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
+ atomic_dec(&folio->_entire_mapcount);
}
-static inline void page_dup_file_rmap(struct page *page, bool compound)
+static __always_inline void __folio_dup_file_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
{
- __page_dup_rmap(page, compound);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ do {
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
}
/**
- * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped
- * anonymous page
- * @page: the page to duplicate the mapping for
- * @compound: the page is mapped as compound or as a small page
- * @vma: the source vma
+ * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
*
- * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq.
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * Duplicating the mapping can only fail if the page may be pinned; device
- * private pages cannot get pinned and consequently this function cannot fail.
+ * The caller needs to hold the page table lock.
+ */
+static inline void folio_dup_file_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages)
+{
+ __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
+}
+#define folio_dup_file_rmap_pte(folio, page) \
+ folio_dup_file_rmap_ptes(folio, page, 1)
+
+/**
+ * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
*
- * If duplicating the mapping succeeds, the page has to be mapped R/O into
- * the parent and the child. It must *not* get mapped writable after this call.
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
*
- * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ * The caller needs to hold the page table lock.
*/
-static inline int page_try_dup_anon_rmap(struct page *page, bool compound,
- struct vm_area_struct *vma)
+static inline void folio_dup_file_rmap_pmd(struct folio *folio,
+ struct page *page)
{
- VM_BUG_ON_PAGE(!PageAnon(page), page);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
- /*
- * No need to check+clear for already shared pages, including KSM
- * pages.
- */
- if (!PageAnonExclusive(page))
- goto dup;
+static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma,
+ enum rmap_level level)
+{
+ bool maybe_pinned;
+ int i;
+
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
/*
- * If this page may have been pinned by the parent process,
- * don't allow to duplicate the mapping but instead require to e.g.,
- * copy the page immediately for the child so that we'll always
- * guarantee the pinned page won't be randomly replaced in the
+ * If this folio may have been pinned by the parent process,
+ * don't allow to duplicate the mappings but instead require to e.g.,
+ * copy the subpage immediately for the child so that we'll always
+ * guarantee the pinned folio won't be randomly replaced in the
* future on write faults.
*/
- if (likely(!is_device_private_page(page) &&
- unlikely(page_needs_cow_for_dma(vma, page))))
- return -EBUSY;
+ maybe_pinned = likely(!folio_is_device_private(folio)) &&
+ unlikely(folio_needs_cow_for_dma(src_vma, folio));
- ClearPageAnonExclusive(page);
/*
- * It's okay to share the anon page between both processes, mapping
- * the page R/O into both processes.
+ * No need to check+clear for already shared PTEs/PMDs of the
+ * folio. But if any page is PageAnonExclusive, we must fallback to
+ * copying if the folio maybe pinned.
*/
-dup:
- __page_dup_rmap(page, compound);
+ switch (level) {
+ case RMAP_LEVEL_PTE:
+ if (unlikely(maybe_pinned)) {
+ for (i = 0; i < nr_pages; i++)
+ if (PageAnonExclusive(page + i))
+ return -EBUSY;
+ }
+ do {
+ if (PageAnonExclusive(page))
+ ClearPageAnonExclusive(page);
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ break;
+ case RMAP_LEVEL_PMD:
+ if (PageAnonExclusive(page)) {
+ if (unlikely(maybe_pinned))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+ }
+ atomic_inc(&folio->_entire_mapcount);
+ break;
+ }
return 0;
}
/**
- * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly
- * shared to prepare for KSM or temporary unmapping
- * @page: the exclusive anonymous page to try marking possibly shared
+ * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
+ * @src_vma: The vm area from which the mappings are duplicated
*
- * The caller needs to hold the PT lock and has to have the page table entry
- * cleared/invalidated.
+ * The page range of the folio is defined by [page, page + nr_pages)
*
- * This is similar to page_try_dup_anon_rmap(), however, not used during fork()
- * to duplicate a mapping, but instead to prepare for KSM or temporarily
- * unmapping a page (swap, migration) via page_remove_rmap().
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
*
- * Marking the page shared can only fail if the page may be pinned; device
- * private pages cannot get pinned and consequently this function cannot fail.
+ * Duplicating the mappings can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
*
- * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY
- * otherwise.
+ * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
+ *
+ * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
+ */
+static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *src_vma)
+{
+ return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma,
+ RMAP_LEVEL_PTE);
+}
+#define folio_try_dup_anon_rmap_pte(folio, page, vma) \
+ folio_try_dup_anon_rmap_ptes(folio, page, 1, vma)
+
+/**
+ * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
+ * @src_vma: The vm area from which the mapping is duplicated
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mapping can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
+ *
+ * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
+ *
+ * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
*/
-static inline int page_try_share_anon_rmap(struct page *page)
+static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
+ struct page *page, struct vm_area_struct *src_vma)
{
- VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
+static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum rmap_level level)
+{
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
- /* device private pages cannot get pinned via GUP. */
- if (unlikely(is_device_private_page(page))) {
+ /* device private folios cannot get pinned via GUP. */
+ if (unlikely(folio_is_device_private(folio))) {
ClearPageAnonExclusive(page);
return 0;
}
@@ -347,7 +544,7 @@ static inline int page_try_share_anon_rmap(struct page *page)
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
smp_mb();
- if (unlikely(page_maybe_dma_pinned(page)))
+ if (unlikely(folio_maybe_dma_pinned(folio)))
return -EBUSY;
ClearPageAnonExclusive(page);
@@ -360,6 +557,68 @@ static inline int page_try_share_anon_rmap(struct page *page)
return 0;
}
+/**
+ * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page
+ * mapped by a PTE possibly shared to prepare
+ * for KSM or temporary unmapping
+ * @folio: The folio to share a mapping of
+ * @page: The mapped exclusive page
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during
+ * fork() to duplicate mappings, but instead to prepare for KSM or temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte().
+ *
+ * Marking the mapped page shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped page possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
+ struct page *page)
+{
+ return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+}
+
+/**
+ * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page
+ * range mapped by a PMD possibly shared to
+ * prepare for temporary unmapping
+ * @folio: The folio to share the mapping of
+ * @page: The first page to share the mapping of
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during
+ * fork() to duplicate a mapping, but instead to prepare for temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd().
+ *
+ * Marking the mapped pages shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped pages possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
+ struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
+ RMAP_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
/*
* Called from mm/vmscan.c to handle paging out
*/
@@ -477,7 +736,6 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
#define anon_vma_init() do {} while (0)
#define anon_vma_prepare(vma) (0)
-#define anon_vma_link(vma) do {} while (0)
static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h
index 4e78651371ba..847c9a06101b 100644
--- a/include/linux/root_dev.h
+++ b/include/linux/root_dev.h
@@ -9,15 +9,8 @@
enum {
Root_NFS = MKDEV(UNNAMED_MAJOR, 255),
Root_CIFS = MKDEV(UNNAMED_MAJOR, 254),
+ Root_Generic = MKDEV(UNNAMED_MAJOR, 253),
Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0),
- Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1),
- Root_FD0 = MKDEV(FLOPPY_MAJOR, 0),
- Root_HDA1 = MKDEV(IDE0_MAJOR, 1),
- Root_HDA2 = MKDEV(IDE0_MAJOR, 2),
- Root_SDA1 = MKDEV(SCSI_DISK0_MAJOR, 1),
- Root_SDA2 = MKDEV(SCSI_DISK0_MAJOR, 2),
- Root_HDC1 = MKDEV(IDE1_MAJOR, 1),
- Root_SR0 = MKDEV(SCSI_CDROM_MAJOR, 0),
};
extern dev_t ROOT_DEV;
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 523c98b96cb4..90d8e4475f80 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -64,12 +64,14 @@ struct rpmsg_device {
};
typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
+typedef int (*rpmsg_flowcontrol_cb_t)(struct rpmsg_device *, void *, bool);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @rpdev: rpmsg channel device
* @refcount: when this drops to zero, the ept is deallocated
* @cb: rx callback handler
+ * @flow_cb: remote flow control callback handler
* @cb_lock: must be taken before accessing/changing @cb
* @addr: local rpmsg address
* @priv: private data for the driver's use
@@ -92,6 +94,7 @@ struct rpmsg_endpoint {
struct rpmsg_device *rpdev;
struct kref refcount;
rpmsg_rx_cb_t cb;
+ rpmsg_flowcontrol_cb_t flow_cb;
struct mutex cb_lock;
u32 addr;
void *priv;
@@ -106,6 +109,7 @@ struct rpmsg_endpoint {
* @probe: invoked when a matching rpmsg channel (i.e. device) is found
* @remove: invoked when the rpmsg channel is removed
* @callback: invoked when an inbound message is received on the channel
+ * @flowcontrol: invoked when remote side flow control request is received
*/
struct rpmsg_driver {
struct device_driver drv;
@@ -113,6 +117,7 @@ struct rpmsg_driver {
int (*probe)(struct rpmsg_device *dev);
void (*remove)(struct rpmsg_device *dev);
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
+ int (*flowcontrol)(struct rpmsg_device *, void *, bool);
};
static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val)
@@ -192,6 +197,8 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
+int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst);
+
#else
static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
@@ -316,6 +323,14 @@ static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
return -ENXIO;
}
+static inline int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
#endif /* IS_ENABLED(CONFIG_RPMSG) */
/* use a macro to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
new file mode 100644
index 000000000000..bc8af3eb5598
--- /dev/null
+++ b/include/linux/rseq.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _LINUX_RSEQ_H
+#define _LINUX_RSEQ_H
+
+#ifdef CONFIG_RSEQ
+
+#include <linux/preempt.h>
+#include <linux/sched.h>
+
+/*
+ * Map the event mask on the user-space ABI enum rseq_cs_flags
+ * for direct mask checks.
+ */
+enum rseq_event_mask_bits {
+ RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
+ RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
+ RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
+};
+
+enum rseq_event_mask {
+ RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
+ RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
+ RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
+};
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+ if (t->rseq)
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+}
+
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
+
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+ if (current->rseq)
+ __rseq_handle_notify_resume(ksig, regs);
+}
+
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+ preempt_disable();
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
+ preempt_enable();
+ rseq_handle_notify_resume(ksig, regs);
+}
+
+/* rseq_preempt() requires preemption to be disabled. */
+static inline void rseq_preempt(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/* rseq_migrate() requires preemption to be disabled. */
+static inline void rseq_migrate(struct task_struct *t)
+{
+ __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
+ rseq_set_notify_resume(t);
+}
+
+/*
+ * If parent process has a registered restartable sequences area, the
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
+ */
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+ if (clone_flags & CLONE_VM) {
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+ } else {
+ t->rseq = current->rseq;
+ t->rseq_len = current->rseq_len;
+ t->rseq_sig = current->rseq_sig;
+ t->rseq_event_mask = current->rseq_event_mask;
+ }
+}
+
+static inline void rseq_execve(struct task_struct *t)
+{
+ t->rseq = NULL;
+ t->rseq_len = 0;
+ t->rseq_sig = 0;
+ t->rseq_event_mask = 0;
+}
+
+#else
+
+static inline void rseq_set_notify_resume(struct task_struct *t)
+{
+}
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+}
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
+{
+}
+static inline void rseq_preempt(struct task_struct *t)
+{
+}
+static inline void rseq_migrate(struct task_struct *t)
+{
+}
+static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+{
+}
+static inline void rseq_execve(struct task_struct *t)
+{
+}
+
+#endif
+
+#ifdef CONFIG_DEBUG_RSEQ
+
+void rseq_syscall(struct pt_regs *regs);
+
+#else
+
+static inline void rseq_syscall(struct pt_regs *regs)
+{
+}
+
+#endif
+
+#endif /* _LINUX_RSEQ_H */
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
index 238bb85243d3..a04dacbdc8ae 100644
--- a/include/linux/rslib.h
+++ b/include/linux/rslib.h
@@ -10,7 +10,6 @@
#ifndef _RSLIB_H_
#define _RSLIB_H_
-#include <linux/list.h>
#include <linux/types.h> /* for gfp_t */
#include <linux/gfp.h> /* for GFP_KERNEL */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 1fd9c6a21ebe..5f8e438a0312 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -146,6 +146,7 @@ struct rtc_device {
time64_t range_min;
timeu64_t range_max;
+ timeu64_t alarm_offset_max;
time64_t start_secs;
time64_t offset_secs;
bool set_start_time;
@@ -224,6 +225,23 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
+/**
+ * rtc_bound_alarmtime() - Return alarm time bound by rtc limit
+ * @rtc: Pointer to rtc device structure
+ * @requested: Requested alarm timeout
+ *
+ * Return: Alarm timeout bound by maximum alarm time supported by rtc.
+ */
+static inline ktime_t rtc_bound_alarmtime(struct rtc_device *rtc,
+ ktime_t requested)
+{
+ if (rtc->alarm_offset_max &&
+ rtc->alarm_offset_max * MSEC_PER_SEC < ktime_to_ms(requested))
+ return ms_to_ktime(rtc->alarm_offset_max * MSEC_PER_SEC);
+
+ return requested;
+}
+
#define devm_rtc_register_device(device) \
__devm_rtc_register_device(THIS_MODULE, device)
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 3d6cf306cd55..410529fca18b 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -10,6 +10,13 @@
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
+
+static inline int rtnetlink_maybe_send(struct sk_buff *skb, struct net *net,
+ u32 pid, u32 group, int echo)
+{
+ return !skb ? 0 : rtnetlink_send(skb, net, pid, group, echo);
+}
+
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
u32 group, const struct nlmsghdr *nlh, gfp_t flags);
@@ -72,6 +79,18 @@ static inline bool lockdep_rtnl_is_held(void)
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())
+/**
+ * rcu_replace_pointer_rtnl - replace an RCU pointer under rtnl_lock, returning
+ * its old value
+ * @rp: RCU pointer, whose value is returned
+ * @p: regular pointer
+ *
+ * Perform a replacement under rtnl_lock, where @rp is an RCU-annotated
+ * pointer. The old value of @rp is returned, and @rp is set to @p
+ */
+#define rcu_replace_pointer_rtnl(rp, p) \
+ rcu_replace_pointer(rp, p, lockdep_rtnl_is_held())
+
static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
{
return rtnl_dereference(dev->ingress_queue);
@@ -130,4 +149,26 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
extern void rtnl_offload_xstats_notify(struct net_device *dev);
+static inline int rtnl_has_listeners(const struct net *net, u32 group)
+{
+ struct sock *rtnl = net->rtnl;
+
+ return netlink_has_listeners(rtnl, group);
+}
+
+/**
+ * rtnl_notify_needed - check if notification is needed
+ * @net: Pointer to the net namespace
+ * @nlflags: netlink ingress message flags
+ * @group: rtnl group
+ *
+ * Based on the ingress message flags and rtnl group, returns true
+ * if a notification is needed, false otherwise.
+ */
+static inline bool
+rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group)
+{
+ return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group);
+}
+
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 534038d962e4..4612ef09a0c7 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -60,6 +60,7 @@
#define SD_EXIST (1 << 16)
#define DELINK_INT GPIO0_INT
#define MS_OC_INT (1 << 23)
+#define SD_OVP_INT (1 << 23)
#define SD_OC_INT (1 << 22)
#define CARD_INT (XD_INT | MS_INT | SD_INT)
@@ -80,6 +81,7 @@
#define OC_INT_EN (1 << 23)
#define DELINK_INT_EN GPIO0_INT_EN
#define MS_OC_INT_EN (1 << 23)
+#define SD_OVP_INT_EN (1 << 23)
#define SD_OC_INT_EN (1 << 22)
#define RTSX_DUM_REG 0x1C
@@ -583,6 +585,7 @@
#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
+#define CDGW 0xFE53
#define WAKE_SEL_CTL 0xFE54
#define PCLK_CTL 0xFE55
#define PCLK_MODE_SEL 0x20
@@ -764,6 +767,9 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
+#define RTS5264_AUTOLOAD_CFG2 0xFF7D
+#define RTS5264_CHIP_RST_N_SEL (1 << 6)
+
#define RTS5260_AUTOLOAD_CFG4 0xFF7F
#define RTS5260_MIMO_DISABLE 0x8A
/*RTS5261*/
@@ -1261,6 +1267,7 @@ struct rtsx_pcr {
u8 dma_error_count;
u8 ocp_stat;
u8 ocp_stat2;
+ u8 ovp_stat;
u8 rtd3_en;
};
@@ -1271,6 +1278,7 @@ struct rtsx_pcr {
#define PID_5260 0x5260
#define PID_5261 0x5261
#define PID_5228 0x5228
+#define PID_5264 0x5264
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index efa5c324369a..9c29689ff505 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/err.h>
+#include <linux/cleanup.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __RWSEM_DEP_MAP_INIT(lockname) \
@@ -201,6 +202,13 @@ extern void up_read(struct rw_semaphore *sem);
*/
extern void up_write(struct rw_semaphore *sem);
+DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
+DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
+DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
+
+DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
+DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
+
/*
* downgrade write lock to read lock
*/
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 375a5e90d86a..77df3d7b18a6 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -16,7 +16,7 @@ struct scatterlist {
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
-#ifdef CONFIG_PCI_P2PDMA
+#ifdef CONFIG_NEED_SG_DMA_FLAGS
unsigned int dma_flags;
#endif
};
@@ -141,6 +141,30 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
sg->length = len;
}
+/**
+ * sg_set_folio - Set sg entry to point at given folio
+ * @sg: SG entry
+ * @folio: The folio
+ * @len: Length of data
+ * @offset: Offset into folio
+ *
+ * Description:
+ * Use this function to set an sg entry pointing at a folio, never assign
+ * the folio directly. We encode sg table information in the lower bits
+ * of the folio pointer. See sg_page() for looking up the page belonging
+ * to an sg entry.
+ *
+ **/
+static inline void sg_set_folio(struct scatterlist *sg, struct folio *folio,
+ size_t len, size_t offset)
+{
+ WARN_ON_ONCE(len > UINT_MAX);
+ WARN_ON_ONCE(offset > UINT_MAX);
+ sg_assign_page(sg, &folio->page);
+ sg->offset = offset;
+ sg->length = len;
+}
+
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
@@ -249,17 +273,18 @@ static inline void sg_unmark_end(struct scatterlist *sg)
}
/*
- * CONFGI_PCI_P2PDMA depends on CONFIG_64BIT which means there is 4 bytes
- * in struct scatterlist (assuming also CONFIG_NEED_SG_DMA_LENGTH is set).
- * Use this padding for DMA flags bits to indicate when a specific
- * dma address is a bus address.
+ * One 64-bit architectures there is a 4-byte padding in struct scatterlist
+ * (assuming also CONFIG_NEED_SG_DMA_LENGTH is set). Use this padding for DMA
+ * flags bits to indicate when a specific dma address is a bus address or the
+ * buffer may have been bounced via SWIOTLB.
*/
-#ifdef CONFIG_PCI_P2PDMA
+#ifdef CONFIG_NEED_SG_DMA_FLAGS
-#define SG_DMA_BUS_ADDRESS (1 << 0)
+#define SG_DMA_BUS_ADDRESS (1 << 0)
+#define SG_DMA_SWIOTLB (1 << 1)
/**
- * sg_dma_is_bus address - Return whether a given segment was marked
+ * sg_dma_is_bus_address - Return whether a given segment was marked
* as a bus address
* @sg: SG entry
*
@@ -267,13 +292,13 @@ static inline void sg_unmark_end(struct scatterlist *sg)
* Returns true if sg_dma_mark_bus_address() has been called on
* this segment.
**/
-static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
+static inline bool sg_dma_is_bus_address(struct scatterlist *sg)
{
return sg->dma_flags & SG_DMA_BUS_ADDRESS;
}
/**
- * sg_dma_mark_bus address - Mark the scatterlist entry as a bus address
+ * sg_dma_mark_bus_address - Mark the scatterlist entry as a bus address
* @sg: SG entry
*
* Description:
@@ -299,9 +324,37 @@ static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
sg->dma_flags &= ~SG_DMA_BUS_ADDRESS;
}
+/**
+ * sg_dma_is_swiotlb - Return whether the scatterlist was marked for SWIOTLB
+ * bouncing
+ * @sg: SG entry
+ *
+ * Description:
+ * Returns true if the scatterlist was marked for SWIOTLB bouncing. Not all
+ * elements may have been bounced, so the caller would have to check
+ * individual SG entries with is_swiotlb_buffer().
+ */
+static inline bool sg_dma_is_swiotlb(struct scatterlist *sg)
+{
+ return sg->dma_flags & SG_DMA_SWIOTLB;
+}
+
+/**
+ * sg_dma_mark_swiotlb - Mark the scatterlist for SWIOTLB bouncing
+ * @sg: SG entry
+ *
+ * Description:
+ * Marks a a scatterlist for SWIOTLB bounce. Not all SG entries may be
+ * bounced.
+ */
+static inline void sg_dma_mark_swiotlb(struct scatterlist *sg)
+{
+ sg->dma_flags |= SG_DMA_SWIOTLB;
+}
+
#else
-static inline bool sg_is_dma_bus_address(struct scatterlist *sg)
+static inline bool sg_dma_is_bus_address(struct scatterlist *sg)
{
return false;
}
@@ -311,8 +364,15 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
{
}
+static inline bool sg_dma_is_swiotlb(struct scatterlist *sg)
+{
+ return false;
+}
+static inline void sg_dma_mark_swiotlb(struct scatterlist *sg)
+{
+}
-#endif
+#endif /* CONFIG_NEED_SG_DMA_FLAGS */
/**
* sg_phys - Return physical address of an sg entry
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eed5d65b8d1f..ffe8f618ab86 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -10,38 +10,45 @@
#include <uapi/linux/sched.h>
#include <asm/current.h>
-
-#include <linux/pid.h>
-#include <linux/sem.h>
+#include <asm/processor.h>
+#include <linux/thread_info.h>
+#include <linux/preempt.h>
+#include <linux/cpumask.h>
+
+#include <linux/cache.h>
+#include <linux/irqflags_types.h>
+#include <linux/smp_types.h>
+#include <linux/pid_types.h>
+#include <linux/sem_types.h>
#include <linux/shm.h>
#include <linux/kmsan_types.h>
-#include <linux/mutex.h>
-#include <linux/plist.h>
-#include <linux/hrtimer.h>
-#include <linux/irqflags.h>
-#include <linux/seccomp.h>
-#include <linux/nodemask.h>
-#include <linux/rcupdate.h>
-#include <linux/refcount.h>
+#include <linux/mutex_types.h>
+#include <linux/plist_types.h>
+#include <linux/hrtimer_types.h>
+#include <linux/timer_types.h>
+#include <linux/seccomp_types.h>
+#include <linux/nodemask_types.h>
+#include <linux/refcount_types.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
-#include <linux/syscall_user_dispatch.h>
+#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
-#include <linux/posix-timers.h>
-#include <linux/rseq.h>
-#include <linux/seqlock.h>
+#include <linux/posix-timers_types.h>
+#include <linux/restart_block.h>
+#include <uapi/linux/rseq.h>
+#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
#include <linux/rv.h>
#include <linux/livepatch_sched.h>
+#include <linux/uidgid_types.h>
#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-struct backing_dev_info;
struct bio_list;
struct blk_plug;
struct bpf_local_storage;
@@ -64,26 +71,27 @@ struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
-struct sched_param;
+struct sched_dl_entity;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
+struct task_struct;
struct user_event_mm;
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
- * We have two separate sets of flags: task->state
+ * We have two separate sets of flags: task->__state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
-/* Used in tsk->state: */
+/* Used in tsk->__state: */
#define TASK_RUNNING 0x00000000
#define TASK_INTERRUPTIBLE 0x00000001
#define TASK_UNINTERRUPTIBLE 0x00000002
@@ -93,7 +101,7 @@ struct user_event_mm;
#define EXIT_DEAD 0x00000010
#define EXIT_ZOMBIE 0x00000020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
-/* Used in tsk->state again: */
+/* Used in tsk->__state again: */
#define TASK_PARKED 0x00000040
#define TASK_DEAD 0x00000080
#define TASK_WAKEKILL 0x00000100
@@ -174,7 +182,7 @@ struct user_event_mm;
#endif
/*
- * set_current_state() includes a barrier so that the write of current->state
+ * set_current_state() includes a barrier so that the write of current->__state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
@@ -197,9 +205,9 @@ struct user_event_mm;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
* where wake_up_state()/try_to_wake_up() executes a full memory barrier before
- * accessing p->state.
+ * accessing p->__state.
*
- * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
+ * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
@@ -371,6 +379,10 @@ extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
#endif
+struct sched_param {
+ int sched_priority;
+};
+
struct sched_info {
#ifdef CONFIG_SCHED_INFO
/* Cumulative counters: */
@@ -411,42 +423,6 @@ struct load_weight {
u32 inv_weight;
};
-/**
- * struct util_est - Estimation utilization of FAIR tasks
- * @enqueued: instantaneous estimated utilization of a task/cpu
- * @ewma: the Exponential Weighted Moving Average (EWMA)
- * utilization of a task
- *
- * Support data structure to track an Exponential Weighted Moving Average
- * (EWMA) of a FAIR task's utilization. New samples are added to the moving
- * average each time a task completes an activation. Sample's weight is chosen
- * so that the EWMA will be relatively insensitive to transient changes to the
- * task's workload.
- *
- * The enqueued attribute has a slightly different meaning for tasks and cpus:
- * - task: the task's util_avg at last task dequeue time
- * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
- * Thus, the util_est.enqueued of a task represents the contribution on the
- * estimated utilization of the CPU where that task is currently enqueued.
- *
- * Only for tasks we track a moving average of the past instantaneous
- * estimated utilization. This allows to absorb sporadic drops in utilization
- * of an otherwise almost periodic task.
- *
- * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
- * updates. When a task is dequeued, its util_est should not be updated if its
- * util_avg has not been updated in the meantime.
- * This information is mapped into the MSB bit of util_est.enqueued at dequeue
- * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
- * for a task) it is safe to use MSB.
- */
-struct util_est {
- unsigned int enqueued;
- unsigned int ewma;
-#define UTIL_EST_WEIGHT_SHIFT 2
-#define UTIL_AVG_UNCHANGED 0x80000000
-} __attribute__((__aligned__(sizeof(u64))));
-
/*
* The load/runnable/util_avg accumulates an infinite geometric series
* (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
@@ -501,9 +477,20 @@ struct sched_avg {
unsigned long load_avg;
unsigned long runnable_avg;
unsigned long util_avg;
- struct util_est util_est;
+ unsigned int util_est;
} ____cacheline_aligned;
+/*
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est at dequeue time.
+ * Since max value of util_est for a task is 1024 (PELT util_avg for a task)
+ * it is safe to use MSB.
+ */
+#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
+
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
@@ -521,7 +508,7 @@ struct sched_statistics {
u64 block_max;
s64 sum_block_runtime;
- u64 exec_max;
+ s64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
@@ -550,13 +537,18 @@ struct sched_entity {
/* For load-balancing: */
struct load_weight load;
struct rb_node run_node;
+ u64 deadline;
+ u64 min_vruntime;
+
struct list_head group_node;
unsigned int on_rq;
u64 exec_start;
u64 sum_exec_runtime;
- u64 vruntime;
u64 prev_sum_exec_runtime;
+ u64 vruntime;
+ s64 vlag;
+ u64 slice;
u64 nr_migrations;
@@ -600,6 +592,9 @@ struct sched_rt_entity {
#endif
} __randomize_layout;
+typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
+
struct sched_dl_entity {
struct rb_node rb_node;
@@ -647,6 +642,7 @@ struct sched_dl_entity {
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
+ unsigned int dl_server : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -661,7 +657,20 @@ struct sched_dl_entity {
* timer is needed to decrease the active utilization at the correct
* time.
*/
- struct hrtimer inactive_timer;
+ struct hrtimer inactive_timer;
+
+ /*
+ * Bits for DL-server functionality. Also see the comment near
+ * dl_server_update().
+ *
+ * @rq the runqueue this server is for
+ *
+ * @server_has_tasks() returns true if @server_pick return a
+ * runnable task.
+ */
+ struct rq *rq;
+ dl_server_has_tasks_f server_has_tasks;
+ dl_server_pick_f server_pick;
#ifdef CONFIG_RT_MUTEXES
/*
@@ -746,10 +755,8 @@ struct task_struct {
#endif
unsigned int __state;
-#ifdef CONFIG_PREEMPT_RT
/* saved state for "spinlock sleepers" */
unsigned int saved_state;
-#endif
/*
* This begins the randomizable portion of task_struct. Only
@@ -790,6 +797,7 @@ struct task_struct {
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
+ struct sched_dl_entity *dl_server;
const struct sched_class *sched_class;
#ifdef CONFIG_SCHED_CORE
@@ -871,6 +879,7 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
+ struct address_space *faults_disabled_mapping;
int exit_state;
int exit_code;
@@ -907,8 +916,11 @@ struct task_struct {
* ->sched_remote_wakeup gets used, so it can be in this word.
*/
unsigned sched_remote_wakeup:1;
+#ifdef CONFIG_RT_MUTEXES
+ unsigned sched_rt_mutex:1;
+#endif
- /* Bit to tell LSMs we're in execve(): */
+ /* Bit to tell TOMOYO we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
@@ -945,7 +957,7 @@ struct task_struct {
/* Recursion prevention for eventfd_signal() */
unsigned in_eventfd:1;
#endif
-#ifdef CONFIG_IOMMU_SVA
+#ifdef CONFIG_ARCH_HAS_CPU_PASID
unsigned pasid_activated:1;
#endif
#ifdef CONFIG_CPU_SUP_INTEL
@@ -998,7 +1010,6 @@ struct task_struct {
/* PID/PID hash table linkage. */
struct pid *thread_pid;
struct hlist_node pid_links[PIDTYPE_MAX];
- struct list_head thread_group;
struct list_head thread_node;
struct completion *vfork_done;
@@ -1186,8 +1197,6 @@ struct task_struct {
/* VM state: */
struct reclaim_state *reclaim_state;
- struct backing_dev_info *backing_dev_info;
-
struct io_context *io_context;
#ifdef CONFIG_COMPACTION
@@ -1441,6 +1450,10 @@ struct task_struct {
struct mem_cgroup *active_memcg;
#endif
+#ifdef CONFIG_MEMCG_KMEM
+ struct obj_cgroup *objcg;
+#endif
+
#ifdef CONFIG_BLK_CGROUP
struct gendisk *throttle_disk;
#endif
@@ -1551,114 +1564,6 @@ struct task_struct {
*/
};
-static inline struct pid *task_pid(struct task_struct *task)
-{
- return task->thread_pid;
-}
-
-/*
- * the helpers to get the task's different pids as they are seen
- * from various namespaces
- *
- * task_xid_nr() : global id, i.e. the id seen from the init namespace;
- * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
- * current.
- * task_xid_nr_ns() : id seen from the ns specified;
- *
- * see also pid_nr() etc in include/linux/pid.h
- */
-pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
-
-static inline pid_t task_pid_nr(struct task_struct *tsk)
-{
- return tsk->pid;
-}
-
-static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
-}
-
-static inline pid_t task_pid_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
-}
-
-
-static inline pid_t task_tgid_nr(struct task_struct *tsk)
-{
- return tsk->tgid;
-}
-
-/**
- * pid_alive - check that a task structure is not stale
- * @p: Task structure to be checked.
- *
- * Test if a process is not yet dead (at most zombie state)
- * If pid_alive fails, then pointers within the task structure
- * can be stale and must not be dereferenced.
- *
- * Return: 1 if the process is alive. 0 otherwise.
- */
-static inline int pid_alive(const struct task_struct *p)
-{
- return p->thread_pid != NULL;
-}
-
-static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
-}
-
-static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
-}
-
-
-static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
-}
-
-static inline pid_t task_session_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
-}
-
-static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
-}
-
-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
-}
-
-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
-{
- pid_t pid = 0;
-
- rcu_read_lock();
- if (pid_alive(tsk))
- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
- rcu_read_unlock();
-
- return pid;
-}
-
-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
-{
- return task_ppid_nr_ns(tsk, &init_pid_ns);
-}
-
-/* Obsolete, do not use: */
-static inline pid_t task_pgrp_nr(struct task_struct *tsk)
-{
- return task_pgrp_nr_ns(tsk, &init_pid_ns);
-}
-
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
@@ -1669,7 +1574,7 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
- if (tsk_state == TASK_IDLE)
+ if ((tsk_state & TASK_IDLE) == TASK_IDLE)
state = TASK_REPORT_IDLE;
/*
@@ -1677,7 +1582,7 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
* to userspace, we can make this appear as if the task has gone through
* a regular rt_mutex_lock() call.
*/
- if (tsk_state == TASK_RTLOCK_WAIT)
+ if (tsk_state & TASK_RTLOCK_WAIT)
state = TASK_UNINTERRUPTIBLE;
return fls(state);
@@ -1702,20 +1607,6 @@ static inline char task_state_to_char(struct task_struct *tsk)
return task_index_to_char(task_state_index(tsk));
}
-/**
- * is_global_init - check if a task structure is init. Since init
- * is free to have sub-threads we need to check tgid.
- * @tsk: Task structure to be checked.
- *
- * Check if a task structure is the first user space task the kernel created.
- *
- * Return: 1 if the task structure is init. 0 otherwise.
- */
-static inline int is_global_init(struct task_struct *tsk)
-{
- return task_tgid_nr(tsk) == 1;
-}
-
extern struct pid *cad_pid;
/*
@@ -1852,9 +1743,21 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
+extern int task_can_attach(struct task_struct *p);
+extern int dl_bw_alloc(int cpu, u64 dl_bw);
+extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_SMP
+
+/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+
+/**
+ * set_cpus_allowed_ptr - set CPU affinity mask of a task
+ * @p: the task
+ * @new_mask: CPU affinity mask
+ *
+ * Return: zero if successful, or a negative error code
+ */
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
extern void release_user_cpus_ptr(struct task_struct *p);
@@ -1933,9 +1836,7 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
-#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
struct task_struct task;
-#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
@@ -2006,15 +1907,12 @@ static __always_inline void scheduler_ipi(void)
*/
preempt_fold_need_resched();
}
-extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
#else
static inline void scheduler_ipi(void) { }
-static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
-{
- return 1;
-}
#endif
+extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
+
/*
* Set thread flags in other task's structures.
* See asm/thread_info.h for TIF_xxxx flags available:
@@ -2158,15 +2056,6 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
__cond_resched_rwlock_write(lock); \
})
-static inline void cond_resched_rcu(void)
-{
-#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
- rcu_read_unlock();
- cond_resched();
- rcu_read_lock();
-#endif
-}
-
#ifdef CONFIG_PREEMPT_DYNAMIC
extern bool preempt_model_none(void);
@@ -2208,37 +2097,6 @@ static inline bool preempt_model_preemptible(void)
return preempt_model_full() || preempt_model_rt();
}
-/*
- * Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
- * but a general need for low latency)
- */
-static inline int spin_needbreak(spinlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
- return spin_is_contended(lock);
-#else
- return 0;
-#endif
-}
-
-/*
- * Check if a rwlock is contended.
- * Returns non-zero if there is another task waiting on the rwlock.
- * Returns zero if the lock is not contended or the system / underlying
- * rwlock implementation does not support contention detection.
- * Technically does not depend on CONFIG_PREEMPTION, but a general need
- * for low latency.
- */
-static inline int rwlock_needbreak(rwlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
- return rwlock_is_contended(lock);
-#else
- return 0;
-#endif
-}
-
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -2273,6 +2131,8 @@ extern bool sched_task_on_rq(struct task_struct *p);
extern unsigned long get_wchan(struct task_struct *p);
extern struct task_struct *cpu_curr_snapshot(int cpu);
+#include <linux/spinlock.h>
+
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@@ -2309,137 +2169,16 @@ static inline bool owner_on_cpu(struct task_struct *owner)
unsigned long sched_cpu_util(int cpu);
#endif /* CONFIG_SMP */
-#ifdef CONFIG_RSEQ
-
-/*
- * Map the event mask on the user-space ABI enum rseq_cs_flags
- * for direct mask checks.
- */
-enum rseq_event_mask_bits {
- RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
- RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
- RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
-};
-
-enum rseq_event_mask {
- RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
- RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
- RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
-};
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
- if (t->rseq)
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
-}
-
-void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
-
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- if (current->rseq)
- __rseq_handle_notify_resume(ksig, regs);
-}
-
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- preempt_disable();
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
- preempt_enable();
- rseq_handle_notify_resume(ksig, regs);
-}
-
-/* rseq_preempt() requires preemption to be disabled. */
-static inline void rseq_preempt(struct task_struct *t)
-{
- __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
-}
-
-/* rseq_migrate() requires preemption to be disabled. */
-static inline void rseq_migrate(struct task_struct *t)
-{
- __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
-}
-
-/*
- * If parent process has a registered restartable sequences area, the
- * child inherits. Unregister rseq for a clone with CLONE_VM set.
- */
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
-{
- if (clone_flags & CLONE_VM) {
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
- } else {
- t->rseq = current->rseq;
- t->rseq_len = current->rseq_len;
- t->rseq_sig = current->rseq_sig;
- t->rseq_event_mask = current->rseq_event_mask;
- }
-}
-
-static inline void rseq_execve(struct task_struct *t)
-{
- t->rseq = NULL;
- t->rseq_len = 0;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
-}
-
-#else
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
-}
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
-{
-}
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
-{
-}
-static inline void rseq_preempt(struct task_struct *t)
-{
-}
-static inline void rseq_migrate(struct task_struct *t)
-{
-}
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
-{
-}
-static inline void rseq_execve(struct task_struct *t)
-{
-}
-
-#endif
-
-#ifdef CONFIG_DEBUG_RSEQ
-
-void rseq_syscall(struct pt_regs *regs);
-
-#else
-
-static inline void rseq_syscall(struct pt_regs *regs)
-{
-}
-
-#endif
-
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p);
extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
unsigned long uaddr);
+extern int sched_core_idle_cpu(int cpu);
#else
static inline void sched_core_free(struct task_struct *tsk) { }
static inline void sched_core_fork(struct task_struct *p) { }
+static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
#endif
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index ca008f7d3615..196f0ca351a2 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -12,7 +12,16 @@
*
* Please use one of the three interfaces below.
*/
-extern unsigned long long notrace sched_clock(void);
+extern u64 sched_clock(void);
+
+#if defined(CONFIG_ARCH_WANTS_NO_INSTR) || defined(CONFIG_GENERIC_SCHED_CLOCK)
+extern u64 sched_clock_noinstr(void);
+#else
+static __always_inline u64 sched_clock_noinstr(void)
+{
+ return sched_clock();
+}
+#endif
/*
* See the comment in kernel/sched/clock.c
@@ -45,6 +54,11 @@ static inline u64 cpu_clock(int cpu)
return sched_clock();
}
+static __always_inline u64 local_clock_noinstr(void)
+{
+ return sched_clock_noinstr();
+}
+
static __always_inline u64 local_clock(void)
{
return sched_clock();
@@ -79,6 +93,7 @@ static inline u64 cpu_clock(int cpu)
return sched_clock_cpu(cpu);
}
+extern u64 local_clock_noinstr(void);
extern u64 local_clock(void);
#endif
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 0ee96ea7a0e9..02f5090ffea2 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
+#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
/*
@@ -85,10 +86,22 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_HAS_MDWE 28
#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE)
-#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
+
+#define MMF_HAS_MDWE_NO_INHERIT 29
+
+#define MMF_VM_MERGE_ANY 30
+#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
- MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK)
+ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
+ MMF_VM_MERGE_ANY_MASK)
+
+static inline unsigned long mmf_init_flags(unsigned long flags)
+{
+ if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
+ flags &= ~((1UL << MMF_HAS_MDWE) |
+ (1UL << MMF_HAS_MDWE_NO_INHERIT));
+ return flags & MMF_INIT_MASK;
+}
-#define MMF_VM_MERGE_ANY 29
#endif /* _LINUX_SCHED_COREDUMP_H */
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 7c83d4d5a971..df3aca89d4f5 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -1,4 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_DEADLINE_H
+#define _LINUX_SCHED_DEADLINE_H
/*
* SCHED_DEADLINE tasks has negative priorities, reflecting
@@ -34,3 +36,5 @@ extern void dl_add_task_root_domain(struct task_struct *p);
extern void dl_clear_root_domain(struct root_domain *rd);
#endif /* CONFIG_SMP */
+
+#endif /* _LINUX_SCHED_DEADLINE_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index fe1a46f30d24..2b461129d1fa 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -2,6 +2,7 @@
#define _LINUX_SCHED_ISOLATION_H
#include <linux/cpumask.h>
+#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/tick.h>
@@ -67,7 +68,8 @@ static inline bool housekeeping_cpu(int cpu, enum hk_type type)
static inline bool cpu_is_isolated(int cpu)
{
return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
- !housekeeping_test_cpu(cpu, HK_TYPE_TICK);
+ !housekeeping_test_cpu(cpu, HK_TYPE_TICK) ||
+ cpuset_cpu_is_isolated(cpu);
}
#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 8d89c8c4fac1..9a19f1b42f64 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -403,6 +403,10 @@ DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
* __GFP_ACCOUNT allocations till the end of the scope will be charged to the
* given memcg.
*
+ * Please, make sure that caller has a reference to the passed memcg structure,
+ * so its lifetime is guaranteed to exceed the scope between two
+ * set_active_memcg() calls.
+ *
* NOTE: This function can nest. Users must save the return value and
* reset the previous value after their own charging scope is over.
*/
diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
index 3988762efe15..52b22c5c396d 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -15,13 +15,23 @@
#define TNF_FAULT_LOCAL 0x08
#define TNF_MIGRATE_FAIL 0x10
+enum numa_vmaskip_reason {
+ NUMAB_SKIP_UNSUITABLE,
+ NUMAB_SKIP_SHARED_RO,
+ NUMAB_SKIP_INACCESSIBLE,
+ NUMAB_SKIP_SCAN_DELAY,
+ NUMAB_SKIP_PID_INACTIVE,
+ NUMAB_SKIP_IGNORE_PID,
+ NUMAB_SKIP_SEQ_COMPLETED,
+};
+
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p, bool final);
-extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
- int src_nid, int dst_cpu);
+bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
+ int src_nid, int dst_cpu);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
int flags)
@@ -38,7 +48,7 @@ static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
- struct page *page, int src_nid, int dst_cpu)
+ struct folio *folio, int src_nid, int dst_cpu)
{
return true;
}
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 994c25640e15..b2b9e6eb9683 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
}
#ifdef CONFIG_RT_MUTEXES
+extern void rt_mutex_pre_schedule(void);
+extern void rt_mutex_schedule(void);
+extern void rt_mutex_post_schedule(void);
+
/*
* Must hold either p->pi_lock or task_rq(p)->lock.
*/
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
index 57bde66d95f7..a8b28647aafc 100644
--- a/include/linux/sched/sd_flags.h
+++ b/include/linux/sched/sd_flags.h
@@ -110,6 +110,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
/*
+ * Domain members share CPU cluster (LLC tags or L2 cache)
+ *
+ * NEEDS_GROUPS: Clusters are shared between groups.
+ */
+SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)
+
+/*
* Domain members share CPU package resources (i.e. caches)
*
* SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
@@ -132,12 +139,9 @@ SD_FLAG(SD_SERIALIZE, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
/*
* Place busy tasks earlier in the domain
*
- * SHARED_CHILD: Usually set on the SMT level. Technically could be set further
- * up, but currently assumed to be set from the base domain
- * upwards (see update_top_cache_domain()).
* NEEDS_GROUPS: Load balancing flag.
*/
-SD_FLAG(SD_ASYM_PACKING, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS)
/*
* Prefer to place tasks in a sibling domain
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 20099268fa25..4b7664c56208 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -9,6 +9,7 @@
#include <linux/sched/task.h>
#include <linux/cred.h>
#include <linux/refcount.h>
+#include <linux/pid.h>
#include <linux/posix-timers.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>
@@ -135,7 +136,7 @@ struct signal_struct {
#ifdef CONFIG_POSIX_TIMERS
/* POSIX.1b Interval Timers */
- int posix_timer_id;
+ unsigned int next_posix_timer_id;
struct list_head posix_timers;
/* ITIMER_REAL timer for the process */
@@ -303,20 +304,11 @@ static inline void kernel_signal_stop(void)
schedule();
}
-#ifdef __ia64__
-# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
-#else
-# define ___ARCH_SI_IA64(_a1, _a2, _a3)
-#endif
-int force_sig_fault_to_task(int sig, int code, void __user *addr
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
- , struct task_struct *t);
-int force_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
-int send_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
- , struct task_struct *t);
+int force_sig_fault_to_task(int sig, int code, void __user *addr,
+ struct task_struct *t);
+int force_sig_fault(int sig, int code, void __user *addr);
+int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t);
int force_sig_mceerr(int code, void __user *, short);
int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
@@ -441,7 +433,6 @@ static inline bool fault_signal_pending(vm_fault_t fault_flags,
* This is required every time the blocked sigset_t changes.
* callers must hold sighand->siglock.
*/
-extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void calculate_sigpending(void);
@@ -649,17 +640,18 @@ extern void flush_itimer_signals(void);
extern bool current_is_single_threaded(void);
/*
- * Careful: do_each_thread/while_each_thread is a double loop so
- * 'break' will not work as expected - use goto instead.
+ * Without tasklist/siglock it is only rcu-safe if g can't exit/exec,
+ * otherwise next_thread(t) will never reach g after list_del_rcu(g).
*/
-#define do_each_thread(g, t) \
- for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
-
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
+#define for_other_threads(p, t) \
+ for (t = p; (t = next_thread(t)) != p; )
+
#define __for_each_thread(signal, t) \
- list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
+ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
+ lockdep_is_held(&tasklist_lock))
#define for_each_thread(p, t) \
__for_each_thread((p)->signal, t)
@@ -718,15 +710,26 @@ bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
return p1->signal == p2->signal;
}
-static inline struct task_struct *next_thread(const struct task_struct *p)
+/*
+ * returns NULL if p is the last thread in the thread group
+ */
+static inline struct task_struct *__next_thread(struct task_struct *p)
+{
+ return list_next_or_null_rcu(&p->signal->thread_head,
+ &p->thread_node,
+ struct task_struct,
+ thread_node);
+}
+
+static inline struct task_struct *next_thread(struct task_struct *p)
{
- return list_entry_rcu(p->thread_group.next,
- struct task_struct, thread_group);
+ return __next_thread(p) ?: p->group_leader;
}
static inline int thread_group_empty(struct task_struct *p)
{
- return list_empty(&p->thread_group);
+ return thread_group_leader(p) &&
+ list_is_last(&p->thread_node, &p->signal->thread_head);
}
#define delay_group_leader(p) \
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index 59d3736c454c..fb1e295e7e63 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -17,4 +17,4 @@ static inline bool sched_smt_active(void) { return false; }
void arch_smt_update(void);
-#endif
+#endif /* _LINUX_SCHED_SMT_H */
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index e0f5ac90a228..d362aacf9f89 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -7,6 +7,8 @@
* functionality:
*/
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -118,13 +120,51 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
}
extern void __put_task_struct(struct task_struct *t);
+extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
static inline void put_task_struct(struct task_struct *t)
{
- if (refcount_dec_and_test(&t->usage))
+ if (!refcount_dec_and_test(&t->usage))
+ return;
+
+ /*
+ * In !RT, it is always safe to call __put_task_struct().
+ * Under RT, we can only call it in preemptible context.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
+ static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
+
+ lock_map_acquire_try(&put_task_map);
__put_task_struct(t);
+ lock_map_release(&put_task_map);
+ return;
+ }
+
+ /*
+ * under PREEMPT_RT, we can't call put_task_struct
+ * in atomic context because it will indirectly
+ * acquire sleeping locks.
+ *
+ * call_rcu() will schedule delayed_put_task_struct_rcu()
+ * to be called in process context.
+ *
+ * __put_task_struct() is called when
+ * refcount_dec_and_test(&t->usage) succeeds.
+ *
+ * This means that it can't "conflict" with
+ * put_task_struct_rcu_user() which abuses ->rcu the same
+ * way; rcu_users has a reference so task->usage can't be
+ * zero after rcu_users 1 -> 0 transition.
+ *
+ * delayed_free_task() also uses ->rcu, but it is only called
+ * when it fails to fork a process. Therefore, there is no
+ * way it can conflict with put_task_struct().
+ */
+ call_rcu(&t->rcu, __put_task_struct_rcu_cb);
}
+DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
+
static inline void put_task_struct_many(struct task_struct *t, int nr)
{
if (refcount_sub_and_test(nr, &t->usage))
@@ -188,4 +228,6 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index f158b025c175..ccd72b978e1f 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/magic.h>
+#include <linux/refcount.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 816df6cc444e..a6e04b4a21d7 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void)
#ifdef CONFIG_SCHED_CLUSTER
static inline int cpu_cluster_flags(void)
{
- return SD_SHARE_PKG_RESOURCES;
+ return SD_CLUSTER | SD_SHARE_PKG_RESOURCES;
}
#endif
@@ -109,8 +109,6 @@ struct sched_domain {
u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;
- u64 avg_scan_cost; /* select_idle_sibling */
-
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -179,6 +177,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
bool cpus_share_cache(int this_cpu, int that_cpu);
+bool cpus_share_resources(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
@@ -203,7 +202,7 @@ struct sched_domain_topology_level {
#endif
};
-extern void set_sched_topology(struct sched_domain_topology_level *tl);
+extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(type) .name = #type
@@ -232,6 +231,11 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
return true;
}
+static inline bool cpus_share_resources(int this_cpu, int that_cpu)
+{
+ return true;
+}
+
#endif /* !CONFIG_SMP */
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
@@ -275,6 +279,14 @@ void arch_update_thermal_pressure(const struct cpumask *cpus,
{ }
#endif
+#ifndef arch_scale_freq_ref
+static __always_inline
+unsigned int arch_scale_freq_ref(int cpu)
+{
+ return 0;
+}
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h
index 3c3e049224ae..969aaf5ef9d6 100644
--- a/include/linux/sched/types.h
+++ b/include/linux/sched/types.h
@@ -20,4 +20,4 @@ struct task_cputime {
unsigned long long sum_exec_runtime;
};
-#endif
+#endif /* _LINUX_SCHED_TYPES_H */
diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h
index 837a23624a66..bc60243d43b3 100644
--- a/include/linux/sched/vhost_task.h
+++ b/include/linux/sched/vhost_task.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_VHOST_TASK_H
-#define _LINUX_VHOST_TASK_H
-
+#ifndef _LINUX_SCHED_VHOST_TASK_H
+#define _LINUX_SCHED_VHOST_TASK_H
struct vhost_task;
@@ -11,4 +10,4 @@ void vhost_task_start(struct vhost_task *vtsk);
void vhost_task_stop(struct vhost_task *vtsk);
void vhost_task_wake(struct vhost_task *vtsk);
-#endif
+#endif /* _LINUX_SCHED_VHOST_TASK_H */
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 0ce5746a4470..f2f05fb42d28 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -58,6 +58,8 @@ struct scmi_clock_info {
u64 step_size;
} range;
};
+ int num_parents;
+ u32 *parents;
};
enum scmi_power_scale {
@@ -80,6 +82,11 @@ struct scmi_protocol_handle;
* @rate_set: set the clock rate of a clock
* @enable: enables the specified clock
* @disable: disables the specified clock
+ * @state_get: get the status of the specified clock
+ * @config_oem_get: get the value of an OEM specific clock config
+ * @config_oem_set: set the value of an OEM specific clock config
+ * @parent_get: get the parent id of a clk
+ * @parent_set: set the parent of a clock
*/
struct scmi_clk_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
@@ -90,22 +97,36 @@ struct scmi_clk_proto_ops {
u64 *rate);
int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 rate);
- int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id);
- int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id);
- int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id);
- int (*disable_atomic)(const struct scmi_protocol_handle *ph,
- u32 clk_id);
+ int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool atomic);
+ int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool atomic);
+ int (*state_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool *enabled, bool atomic);
+ int (*config_oem_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u8 oem_type, u32 *oem_val, u32 *attributes,
+ bool atomic);
+ int (*config_oem_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u8 oem_type, u32 oem_val, bool atomic);
+ int (*parent_get)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 *parent_id);
+ int (*parent_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 parent_id);
+};
+
+struct scmi_perf_domain_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool set_perf;
};
/**
* struct scmi_perf_proto_ops - represents the various operations provided
* by SCMI Performance Protocol
*
+ * @num_domains_get: gets the number of supported performance domains
+ * @info_get: get the information of a performance domain
* @limits_set: sets limits on the performance level of a domain
* @limits_get: gets limits on the performance level of a domain
* @level_set: sets the performance level of a domain
* @level_get: gets the performance level of a domain
- * @device_domain_id: gets the scmi domain id for a given device
* @transition_latency_get: gets the DVFS transition latency for a given device
* @device_opps_add: adds all the OPPs for a given device
* @freq_set: sets the frequency for a given device using sustained frequency
@@ -120,6 +141,9 @@ struct scmi_clk_proto_ops {
* or in some other (abstract) scale
*/
struct scmi_perf_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_perf_domain_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain);
int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 max_perf, u32 min_perf);
int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain,
@@ -128,11 +152,10 @@ struct scmi_perf_proto_ops {
u32 level, bool poll);
int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *level, bool poll);
- int (*device_domain_id)(struct device *dev);
int (*transition_latency_get)(const struct scmi_protocol_handle *ph,
- struct device *dev);
+ u32 domain);
int (*device_opps_add)(const struct scmi_protocol_handle *ph,
- struct device *dev);
+ struct device *dev, u32 domain);
int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long rate, bool poll);
int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain,
@@ -140,7 +163,7 @@ struct scmi_perf_proto_ops {
int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *rate, unsigned long *power);
bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph,
- struct device *dev);
+ u32 domain);
enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph);
};
@@ -629,11 +652,25 @@ struct scmi_powercap_info {
* @num_domains_get: get the count of powercap domains provided by SCMI.
* @info_get: get the information for the specified domain.
* @cap_get: get the current CAP value for the specified domain.
+ * On SCMI platforms supporting powercap zone disabling, this could
+ * report a zero value for a zone where powercapping is disabled.
* @cap_set: set the CAP value for the specified domain to the provided value;
* if the domain supports setting the CAP with an asynchronous command
* this request will finally trigger an asynchronous transfer, but, if
* @ignore_dresp here is set to true, this call will anyway return
* immediately without waiting for the related delayed response.
+ * Note that the powercap requested value must NOT be zero, even if
+ * the platform supports disabling a powercap by setting its cap to
+ * zero (since SCMI v3.2): there are dedicated operations that should
+ * be used for that. (@cap_enable_set/get)
+ * @cap_enable_set: enable or disable the powercapping on the specified domain,
+ * if supported by the SCMI platform implementation.
+ * Note that, by the SCMI specification, the platform can
+ * silently ignore our disable request and decide to enforce
+ * anyway some other powercap value requested by another agent
+ * on the system: for this reason @cap_get and @cap_enable_get
+ * will always report the final platform view of the powercaps.
+ * @cap_enable_get: get the current CAP enable status for the specified domain.
* @pai_get: get the current PAI value for the specified domain.
* @pai_set: set the PAI value for the specified domain to the provided value.
* @measurements_get: retrieve the current average power measurements for the
@@ -662,6 +699,10 @@ struct scmi_powercap_proto_ops {
u32 *power_cap);
int (*cap_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
u32 power_cap, bool ignore_dresp);
+ int (*cap_enable_set)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool enable);
+ int (*cap_enable_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool *enable);
int (*pai_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
u32 *pai);
int (*pai_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 175079552f68..709ad84809e1 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,6 +3,7 @@
#define _LINUX_SECCOMP_H
#include <uapi/linux/seccomp.h>
+#include <linux/seccomp_types.h>
#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
SECCOMP_FILTER_FLAG_LOG | \
@@ -21,25 +22,6 @@
#include <linux/atomic.h>
#include <asm/seccomp.h>
-struct seccomp_filter;
-/**
- * struct seccomp - the state of a seccomp'ed process
- *
- * @mode: indicates one of the valid values above for controlled
- * system calls available to a process.
- * @filter_count: number of seccomp filters
- * @filter: must always point to a valid seccomp-filter or NULL as it is
- * accessed without locking during system call entry.
- *
- * @filter must only be accessed from the context of current as there
- * is no read locking.
- */
-struct seccomp {
- int mode;
- atomic_t filter_count;
- struct seccomp_filter *filter;
-};
-
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
extern int __secure_computing(const struct seccomp_data *sd);
static inline int secure_computing(void)
@@ -64,8 +46,6 @@ static inline int seccomp_mode(struct seccomp *s)
#include <linux/errno.h>
-struct seccomp { };
-struct seccomp_filter { };
struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
@@ -126,6 +106,8 @@ static inline long seccomp_get_metadata(struct task_struct *task,
#ifdef CONFIG_SECCOMP_CACHE_DEBUG
struct seq_file;
+struct pid_namespace;
+struct pid;
int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
diff --git a/include/linux/seccomp_types.h b/include/linux/seccomp_types.h
new file mode 100644
index 000000000000..cf0a0355024f
--- /dev/null
+++ b/include/linux/seccomp_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SECCOMP_TYPES_H
+#define _LINUX_SECCOMP_TYPES_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_SECCOMP
+
+struct seccomp_filter;
+/**
+ * struct seccomp - the state of a seccomp'ed process
+ *
+ * @mode: indicates one of the valid values above for controlled
+ * system calls available to a process.
+ * @filter_count: number of seccomp filters
+ * @filter: must always point to a valid seccomp-filter or NULL as it is
+ * accessed without locking during system call entry.
+ *
+ * @filter must only be accessed from the context of current as there
+ * is no read locking.
+ */
+struct seccomp {
+ int mode;
+ atomic_t filter_count;
+ struct seccomp_filter *filter;
+};
+
+#else
+
+struct seccomp { };
+struct seccomp_filter { };
+
+#endif
+
+#endif /* _LINUX_SECCOMP_TYPES_H */
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
index 988528b5da43..35f3a4a8ceb1 100644
--- a/include/linux/secretmem.h
+++ b/include/linux/secretmem.h
@@ -6,24 +6,23 @@
extern const struct address_space_operations secretmem_aops;
-static inline bool page_is_secretmem(struct page *page)
+static inline bool folio_is_secretmem(struct folio *folio)
{
struct address_space *mapping;
/*
- * Using page_mapping() is quite slow because of the actual call
- * instruction and repeated compound_head(page) inside the
- * page_mapping() function.
+ * Using folio_mapping() is quite slow because of the actual call
+ * instruction.
* We know that secretmem pages are not compound and LRU so we can
* save a couple of cycles here.
*/
- if (PageCompound(page) || !PageLRU(page))
+ if (folio_test_large(folio) || !folio_test_lru(folio))
return false;
mapping = (struct address_space *)
- ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+ ((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS);
- if (!mapping || mapping != page->mapping)
+ if (!mapping || mapping != folio->mapping)
return false;
return mapping->a_ops == &secretmem_aops;
@@ -39,7 +38,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma)
return false;
}
-static inline bool page_is_secretmem(struct page *page)
+static inline bool folio_is_secretmem(struct folio *folio)
{
return false;
}
diff --git a/include/linux/security.h b/include/linux/security.h
index e2734e9e44d5..d0eb20f90b26 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/sockptr.h>
+#include <uapi/linux/lsm.h>
struct linux_binprm;
struct cred;
@@ -60,6 +61,7 @@ struct fs_parameter;
enum fs_value_type;
struct watch;
struct watch_notification;
+struct lsm_ctx;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -138,6 +140,8 @@ enum lockdown_reason {
};
extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
+extern u32 lsm_active_cnt;
+extern const struct lsm_id *lsm_idlist[];
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
@@ -145,12 +149,13 @@ extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
-extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
+extern int cap_capget(const struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file);
int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
int cap_inode_removexattr(struct mnt_idmap *idmap,
@@ -260,6 +265,7 @@ int unregister_blocking_lsm_notifier(struct notifier_block *nb);
/* prototypes */
extern int security_init(void);
extern int early_security_init(void);
+extern u64 lsm_name_to_attr(const char *name);
/* Security operations */
int security_binder_set_context_mgr(const struct cred *mgr);
@@ -268,10 +274,10 @@ int security_binder_transaction(const struct cred *from,
int security_binder_transfer_binder(const struct cred *from,
const struct cred *to);
int security_binder_transfer_file(const struct cred *from,
- const struct cred *to, struct file *file);
+ const struct cred *to, const struct file *file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
-int security_capget(struct task_struct *target,
+int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
@@ -283,16 +289,17 @@ int security_capable(const struct cred *cred,
struct user_namespace *ns,
int cap,
unsigned int opts);
-int security_quotactl(int cmds, int type, int id, struct super_block *sb);
+int security_quotactl(int cmds, int type, int id, const struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
int security_bprm_creds_for_exec(struct linux_binprm *bprm);
-int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file);
int security_bprm_check(struct linux_binprm *bprm);
-void security_bprm_committing_creds(struct linux_binprm *bprm);
-void security_bprm_committed_creds(struct linux_binprm *bprm);
+void security_bprm_committing_creds(const struct linux_binprm *bprm);
+void security_bprm_committed_creds(const struct linux_binprm *bprm);
+int security_fs_context_submount(struct fs_context *fc, struct super_block *reference);
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
int security_sb_alloc(struct super_block *sb);
@@ -302,7 +309,7 @@ void security_free_mnt_opts(void **mnt_opts);
int security_sb_eat_lsm_opts(char *options, void **mnt_opts);
int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts);
int security_sb_remount(struct super_block *sb, void *mnt_opts);
-int security_sb_kern_mount(struct super_block *sb);
+int security_sb_kern_mount(const struct super_block *sb);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(const char *dev_name, const struct path *path,
@@ -387,6 +394,8 @@ int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg);
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags);
int security_mmap_addr(unsigned long addr);
@@ -468,10 +477,13 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, const char *lsm, const char *name,
+int security_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ size_t __user *size, u32 flags);
+int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ size_t size, u32 flags);
+int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value);
-int security_setprocattr(const char *lsm, const char *name, void *value,
- size_t size);
+int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_ismaclabel(const char *name);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
@@ -482,6 +494,8 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
int security_locked_down(enum lockdown_reason what);
+int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, size_t *uctx_len,
+ void *val, size_t val_len, u64 id, u64 flags);
#else /* CONFIG_SECURITY */
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -499,6 +513,11 @@ static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb)
return 0;
}
+static inline u64 lsm_name_to_attr(const char *name)
+{
+ return LSM_ATTR_UNDEF;
+}
+
static inline void security_free_mnt_opts(void **mnt_opts)
{
}
@@ -537,7 +556,7 @@ static inline int security_binder_transfer_binder(const struct cred *from,
static inline int security_binder_transfer_file(const struct cred *from,
const struct cred *to,
- struct file *file)
+ const struct file *file)
{
return 0;
}
@@ -553,7 +572,7 @@ static inline int security_ptrace_traceme(struct task_struct *parent)
return cap_ptrace_traceme(parent);
}
-static inline int security_capget(struct task_struct *target,
+static inline int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
@@ -579,7 +598,7 @@ static inline int security_capable(const struct cred *cred,
}
static inline int security_quotactl(int cmds, int type, int id,
- struct super_block *sb)
+ const struct super_block *sb)
{
return 0;
}
@@ -611,7 +630,7 @@ static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
}
static inline int security_bprm_creds_from_file(struct linux_binprm *bprm,
- struct file *file)
+ const struct file *file)
{
return cap_bprm_creds_from_file(bprm, file);
}
@@ -621,14 +640,19 @@ static inline int security_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline void security_bprm_committing_creds(struct linux_binprm *bprm)
+static inline void security_bprm_committing_creds(const struct linux_binprm *bprm)
{
}
-static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
+static inline void security_bprm_committed_creds(const struct linux_binprm *bprm)
{
}
+static inline int security_fs_context_submount(struct fs_context *fc,
+ struct super_block *reference)
+{
+ return 0;
+}
static inline int security_fs_context_dup(struct fs_context *fc,
struct fs_context *src_fc)
{
@@ -980,6 +1004,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
return 0;
}
+static inline int security_file_ioctl_compat(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
static inline int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
@@ -1330,14 +1361,28 @@ static inline void security_d_instantiate(struct dentry *dentry,
struct inode *inode)
{ }
-static inline int security_getprocattr(struct task_struct *p, const char *lsm,
+static inline int security_getselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t __user *size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_setselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t size, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_getprocattr(struct task_struct *p, int lsmid,
const char *name, char **value)
{
return -EINVAL;
}
-static inline int security_setprocattr(const char *lsm, char *name,
- void *value, size_t size)
+static inline int security_setprocattr(int lsmid, char *name, void *value,
+ size_t size)
{
return -EINVAL;
}
@@ -1388,6 +1433,12 @@ static inline int security_locked_down(enum lockdown_reason what)
{
return 0;
}
+static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx,
+ size_t *uctx_len, void *val, size_t val_len,
+ u64 id, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
@@ -1439,7 +1490,8 @@ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u
int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
void security_sk_free(struct sock *sk);
void security_sk_clone(const struct sock *sk, struct sock *newsk);
-void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic);
+void security_sk_classify_flow(const struct sock *sk,
+ struct flowi_common *flic);
void security_req_classify_flow(const struct request_sock *req,
struct flowi_common *flic);
void security_sock_graft(struct sock*sk, struct socket *parent);
@@ -1465,6 +1517,7 @@ void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk);
int security_sctp_assoc_established(struct sctp_association *asoc,
struct sk_buff *skb);
+int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk);
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct sock *sock,
@@ -1596,7 +1649,7 @@ static inline void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
}
-static inline void security_sk_classify_flow(struct sock *sk,
+static inline void security_sk_classify_flow(const struct sock *sk,
struct flowi_common *flic)
{
}
@@ -1692,6 +1745,11 @@ static inline int security_sctp_assoc_established(struct sctp_association *asoc,
{
return 0;
}
+
+static inline int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
diff --git a/include/linux/sed-opal-key.h b/include/linux/sed-opal-key.h
new file mode 100644
index 000000000000..0ca03054e8f6
--- /dev/null
+++ b/include/linux/sed-opal-key.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SED key operations.
+ *
+ * Copyright (C) 2023 IBM Corporation
+ *
+ * These are the accessor functions (read/write) for SED Opal
+ * keys. Specific keystores can provide overrides.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#ifdef CONFIG_PSERIES_PLPKS_SED
+int sed_read_key(char *keyname, char *key, u_int *keylen);
+int sed_write_key(char *keyname, char *key, u_int keylen);
+#else
+static inline
+int sed_read_key(char *keyname, char *key, u_int *keylen) {
+ return -EOPNOTSUPP;
+}
+static inline
+int sed_write_key(char *keyname, char *key, u_int keylen) {
+ return -EOPNOTSUPP;
+}
+#endif
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index bbae1e52ab4f..2ac50822554e 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -25,6 +25,9 @@ bool opal_unlock_from_suspend(struct opal_dev *dev);
struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv);
int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr);
+#define OPAL_AUTH_KEY "opal-boot-pin"
+#define OPAL_AUTH_KEY_PREV "opal-boot-pin-prev"
+
static inline bool is_sed_ioctl(unsigned int cmd)
{
switch (cmd) {
@@ -47,6 +50,8 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_GET_STATUS:
case IOC_OPAL_GET_LR_STATUS:
case IOC_OPAL_GET_GEOMETRY:
+ case IOC_OPAL_DISCOVERY:
+ case IOC_OPAL_REVERT_LSP:
return true;
}
return false;
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 5608a500c43e..c4deefe42aeb 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -3,25 +3,17 @@
#define _LINUX_SEM_H
#include <uapi/linux/sem.h>
+#include <linux/sem_types.h>
struct task_struct;
-struct sem_undo_list;
#ifdef CONFIG_SYSVIPC
-struct sysv_sem {
- struct sem_undo_list *undo_list;
-};
-
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_sem(struct task_struct *tsk);
#else
-struct sysv_sem {
- /* empty */
-};
-
static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
{
return 0;
diff --git a/include/linux/sem_types.h b/include/linux/sem_types.h
new file mode 100644
index 000000000000..73df1971a7ae
--- /dev/null
+++ b/include/linux/sem_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SEM_TYPES_H
+#define _LINUX_SEM_TYPES_H
+
+struct sem_undo_list;
+
+struct sysv_sem {
+#ifdef CONFIG_SYSVIPC
+ struct sem_undo_list *undo_list;
+#endif
+};
+
+#endif /* _LINUX_SEM_TYPES_H */
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index 515d7fcb9634..fe41da005970 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -2,7 +2,10 @@
#ifndef _LINUX_SEQ_BUF_H
#define _LINUX_SEQ_BUF_H
-#include <linux/fs.h>
+#include <linux/bug.h>
+#include <linux/minmax.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
/*
* Trace sequences are used to allow a function to call several other functions
@@ -10,23 +13,28 @@
*/
/**
- * seq_buf - seq buffer structure
+ * struct seq_buf - seq buffer structure
* @buffer: pointer to the buffer
* @size: size of the buffer
* @len: the amount of data inside the buffer
- * @readpos: The next position to read in the buffer.
*/
struct seq_buf {
char *buffer;
size_t size;
size_t len;
- loff_t readpos;
};
+#define DECLARE_SEQ_BUF(NAME, SIZE) \
+ struct seq_buf NAME = { \
+ .buffer = (char[SIZE]) { 0 }, \
+ .size = SIZE, \
+ }
+
static inline void seq_buf_clear(struct seq_buf *s)
{
s->len = 0;
- s->readpos = 0;
+ if (s->size)
+ s->buffer[0] = '\0';
}
static inline void
@@ -39,7 +47,7 @@ seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
/*
* seq_buf have a buffer that might overflow. When this happens
- * the len and size are set to be equal.
+ * len is set to be greater than size.
*/
static inline bool
seq_buf_has_overflowed(struct seq_buf *s)
@@ -72,10 +80,10 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
}
/**
- * seq_buf_terminate - Make sure buffer is nul terminated
- * @s: the seq_buf descriptor to terminate.
+ * seq_buf_str - get NUL-terminated C string from seq_buf
+ * @s: the seq_buf handle
*
- * This makes sure that the buffer in @s is nul terminated and
+ * This makes sure that the buffer in @s is NUL-terminated and
* safe to read as a string.
*
* Note, if this is called when the buffer has overflowed, then
@@ -84,16 +92,20 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
*
* After this function is called, s->buffer is safe to use
* in string operations.
+ *
+ * Returns: @s->buf after making sure it is terminated.
*/
-static inline void seq_buf_terminate(struct seq_buf *s)
+static inline const char *seq_buf_str(struct seq_buf *s)
{
if (WARN_ON(s->size == 0))
- return;
+ return "";
if (seq_buf_buffer_left(s))
s->buffer[s->len] = 0;
else
s->buffer[s->size - 1] = 0;
+
+ return s->buffer;
}
/**
@@ -101,7 +113,7 @@ static inline void seq_buf_terminate(struct seq_buf *s)
* @s: the seq_buf handle
* @bufp: the beginning of the buffer is stored here
*
- * Return the number of bytes available in the buffer, or zero if
+ * Returns: the number of bytes available in the buffer, or zero if
* there's no space.
*/
static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
@@ -123,7 +135,7 @@ static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
* @num: the number of bytes to commit
*
* Commit @num bytes of data written to a buffer previously acquired
- * by seq_buf_get. To signal an error condition, or that the data
+ * by seq_buf_get_buf(). To signal an error condition, or that the data
* didn't fit in the available space, pass a negative @num value.
*/
static inline void seq_buf_commit(struct seq_buf *s, int num)
@@ -143,7 +155,7 @@ extern __printf(2, 0)
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
- int cnt);
+ size_t start, int cnt);
extern int seq_buf_puts(struct seq_buf *s, const char *str);
extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index bd023dd38ae6..234bcdb1fba4 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -207,6 +207,21 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
+#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
@@ -249,18 +264,19 @@ static inline void seq_show_option(struct seq_file *m, const char *name,
/**
* seq_show_option_n - display mount options with appropriate escapes
- * where @value must be a specific length.
+ * where @value must be a specific length (i.e.
+ * not NUL-terminated).
* @m: the seq_file handle
* @name: the mount option name
* @value: the mount option name's value, cannot be NULL
- * @length: the length of @value to display
+ * @length: the exact length of @value to display, must be constant expression
*
* This is a macro since this uses "length" to define the size of the
* stack buffer.
*/
#define seq_show_option_n(m, name, value, length) { \
char val_buf[length + 1]; \
- strncpy(val_buf, value, length); \
+ memcpy(val_buf, value, length); \
val_buf[length] = '\0'; \
seq_show_option(m, name, val_buf); \
}
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 3926e9027947..d90d8ee29d81 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -18,6 +18,7 @@
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/preempt.h>
+#include <linux/seqlock_types.h>
#include <linux/spinlock.h>
#include <asm/processor.h>
@@ -37,37 +38,6 @@
*/
#define KCSAN_SEQLOCK_REGION_MAX 1000
-/*
- * Sequence counters (seqcount_t)
- *
- * This is the raw counting mechanism, without any writer protection.
- *
- * Write side critical sections must be serialized and non-preemptible.
- *
- * If readers can be invoked from hardirq or softirq contexts,
- * interrupts or bottom halves must also be respectively disabled before
- * entering the write section.
- *
- * This mechanism can't be used if the protected data contains pointers,
- * as the writer can invalidate a pointer that a reader is following.
- *
- * If the write serialization mechanism is one of the common kernel
- * locking primitives, use a sequence counter with associated lock
- * (seqcount_LOCKNAME_t) instead.
- *
- * If it's desired to automatically handle the sequence counter writer
- * serialization and non-preemptibility requirements, use a sequential
- * lock (seqlock_t) instead.
- *
- * See Documentation/locking/seqlock.rst
- */
-typedef struct seqcount {
- unsigned sequence;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} seqcount_t;
-
static inline void __seqcount_init(seqcount_t *s, const char *name,
struct lock_class_key *key)
{
@@ -132,28 +102,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*/
/*
- * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
- * disable preemption. It can lead to higher latencies, and the write side
- * sections will not be able to acquire locks which become sleeping locks
- * (e.g. spinlock_t).
- *
- * To remain preemptible while avoiding a possible livelock caused by the
- * reader preempting the writer, use a different technique: let the reader
- * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
- * case, acquire then release the associated LOCKNAME writer serialization
- * lock. This will allow any possibly-preempted writer to make progress
- * until the end of its writer serialization lock critical section.
- *
- * This lock-unlock technique must be implemented for all of PREEMPT_RT
- * sleeping locks. See Documentation/locking/locktypes.rst
- */
-#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
-#define __SEQ_LOCK(expr) expr
-#else
-#define __SEQ_LOCK(expr)
-#endif
-
-/*
* typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
* @seqcount: The real sequence counter
* @lock: Pointer to the associated lock
@@ -191,22 +139,21 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
* @locktype: LOCKNAME canonical C data type
* @preemptible: preemptibility of above locktype
- * @lockmember: argument for lockdep_assert_held()
- * @lockbase: associated lock release function (prefix only)
- * @lock_acquire: associated lock acquisition function (full call)
+ * @lockbase: prefix for associated lock/unlock
*/
-#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
-typedef struct seqcount_##lockname { \
- seqcount_t seqcount; \
- __SEQ_LOCK(locktype *lock); \
-} seqcount_##lockname##_t; \
- \
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
static __always_inline seqcount_t * \
__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
return &s->seqcount; \
} \
\
+static __always_inline const seqcount_t * \
+__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
+} \
+ \
static __always_inline unsigned \
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
{ \
@@ -216,7 +163,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
return seq; \
\
if (preemptible && unlikely(seq & 1)) { \
- __SEQ_LOCK(lock_acquire); \
+ __SEQ_LOCK(lockbase##_lock(s->lock)); \
__SEQ_LOCK(lockbase##_unlock(s->lock)); \
\
/* \
@@ -242,7 +189,7 @@ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
static __always_inline void \
__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
{ \
- __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+ __SEQ_LOCK(lockdep_assert_held(s->lock)); \
}
/*
@@ -254,6 +201,11 @@ static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
return s;
}
+static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
+{
+ return s;
+}
+
static inline unsigned __seqprop_sequence(const seqcount_t *s)
{
return READ_ONCE(s->sequence);
@@ -271,10 +223,11 @@ static inline void __seqprop_assert(const seqcount_t *s)
#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
-SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
-SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
+#undef SEQCOUNT_LOCKNAME
/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
@@ -294,19 +247,20 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define __seqprop_case(s, lockname, prop) \
- seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop
#define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqprop_##prop((void *)(s)), \
+ seqcount_t: __seqprop_##prop, \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
__seqprop_case((s), mutex, prop))
-#define seqprop_ptr(s) __seqprop(s, ptr)
-#define seqprop_sequence(s) __seqprop(s, sequence)
-#define seqprop_preemptible(s) __seqprop(s, preemptible)
-#define seqprop_assert(s) __seqprop(s, assert)
+#define seqprop_ptr(s) __seqprop(s, ptr)(s)
+#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s)
+#define seqprop_sequence(s) __seqprop(s, sequence)(s)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
+#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
@@ -355,7 +309,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex
*/
#define read_seqcount_begin(s) \
({ \
- seqcount_lockdep_reader_access(seqprop_ptr(s)); \
+ seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \
raw_read_seqcount_begin(s); \
})
@@ -421,7 +375,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex
* Return: true if a read section retry is required, else false
*/
#define __read_seqcount_retry(s, start) \
- do___read_seqcount_retry(seqprop_ptr(s), start)
+ do___read_seqcount_retry(seqprop_const_ptr(s), start)
static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
@@ -441,7 +395,7 @@ static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
* Return: true if a read section retry is required, else false
*/
#define read_seqcount_retry(s, start) \
- do_read_seqcount_retry(seqprop_ptr(s), start)
+ do_read_seqcount_retry(seqprop_const_ptr(s), start)
static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
@@ -512,8 +466,8 @@ do { \
static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- do_raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+ do_raw_write_seqcount_begin(s);
}
/**
@@ -574,7 +528,7 @@ static inline void do_write_seqcount_end(seqcount_t *s)
* via WRITE_ONCE): a) to ensure the writes become visible to other threads
* atomically, avoiding compiler optimizations; b) to document which writes are
* meant to propagate to the reader critical section. This is necessary because
- * neither writes before and after the barrier are enclosed in a seq-writer
+ * neither writes before nor after the barrier are enclosed in a seq-writer
* critical section that would ensure readers are aware of ongoing writes::
*
* seqcount_t seq;
@@ -671,9 +625,9 @@ typedef struct {
*
* Return: sequence counter raw value. Use the lowest bit as an index for
* picking which data copy to read. The full counter must then be checked
- * with read_seqcount_latch_retry().
+ * with raw_read_seqcount_latch_retry().
*/
-static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
+static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
{
/*
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
@@ -683,16 +637,17 @@ static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
}
/**
- * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
* @start: count, from raw_read_seqcount_latch()
*
* Return: true if a read section retry is required, else false
*/
-static inline int
-read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+static __always_inline int
+raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
{
- return read_seqcount_retry(&s->seqcount, start);
+ smp_rmb();
+ return unlikely(READ_ONCE(s->seqcount.sequence) != start);
}
/**
@@ -752,7 +707,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* entry = data_query(latch->data[idx], ...);
*
* // This includes needed smp_rmb()
- * } while (read_seqcount_latch_retry(&latch->seq, seq));
+ * } while (raw_read_seqcount_latch_retry(&latch->seq, seq));
*
* return entry;
* }
@@ -783,25 +738,6 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
smp_wmb(); /* increment "sequence" before following stores */
}
-/*
- * Sequential locks (seqlock_t)
- *
- * Sequence counters with an embedded spinlock for writer serialization
- * and non-preemptibility.
- *
- * For more info, see:
- * - Comments on top of seqcount_t
- * - Documentation/locking/seqlock.rst
- */
-typedef struct {
- /*
- * Make sure that readers don't starve writers on PREEMPT_RT: use
- * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
- */
- seqcount_spinlock_t seqcount;
- spinlock_t lock;
-} seqlock_t;
-
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
.seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
@@ -863,7 +799,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
}
/*
- * For all seqlock_t write side functions, use the the internal
+ * For all seqlock_t write side functions, use the internal
* do_write_seqcount_begin() instead of generic write_seqcount_begin().
* This way, no redundant lockdep_assert_held() checks are added.
*/
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
new file mode 100644
index 000000000000..dfdf43e3fa3d
--- /dev/null
+++ b/include/linux/seqlock_types.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SEQLOCK_TYPES_H
+#define __LINUX_SEQLOCK_TYPES_H
+
+#include <linux/lockdep_types.h>
+#include <linux/mutex_types.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * Sequence counters (seqcount_t)
+ *
+ * This is the raw counting mechanism, without any writer protection.
+ *
+ * Write side critical sections must be serialized and non-preemptible.
+ *
+ * If readers can be invoked from hardirq or softirq contexts,
+ * interrupts or bottom halves must also be respectively disabled before
+ * entering the write section.
+ *
+ * This mechanism can't be used if the protected data contains pointers,
+ * as the writer can invalidate a pointer that a reader is following.
+ *
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKNAME_t) instead.
+ *
+ * If it's desired to automatically handle the sequence counter writer
+ * serialization and non-preemptibility requirements, use a sequential
+ * lock (seqlock_t) instead.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+typedef struct seqcount {
+ unsigned sequence;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} seqcount_t;
+
+/*
+ * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
+ * disable preemption. It can lead to higher latencies, and the write side
+ * sections will not be able to acquire locks which become sleeping locks
+ * (e.g. spinlock_t).
+ *
+ * To remain preemptible while avoiding a possible livelock caused by the
+ * reader preempting the writer, use a different technique: let the reader
+ * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
+ * case, acquire then release the associated LOCKNAME writer serialization
+ * lock. This will allow any possibly-preempted writer to make progress
+ * until the end of its writer serialization lock critical section.
+ *
+ * This lock-unlock technique must be implemented for all of PREEMPT_RT
+ * sleeping locks. See Documentation/locking/locktypes.rst
+ */
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
+#define __SEQ_LOCK(expr) expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
+typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+} seqcount_##lockname##_t;
+
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
+#undef SEQCOUNT_LOCKNAME
+
+/*
+ * Sequential locks (seqlock_t)
+ *
+ * Sequence counters with an embedded spinlock for writer serialization
+ * and non-preemptibility.
+ *
+ * For more info, see:
+ * - Comments on top of seqcount_t
+ * - Documentation/locking/seqlock.rst
+ */
+typedef struct {
+ /*
+ * Make sure that readers don't starve writers on PREEMPT_RT: use
+ * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
+ */
+ seqcount_spinlock_t seqcount;
+ spinlock_t lock;
+} seqlock_t;
+
+#endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index f5f97fa25e8a..3fab88ba265e 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -27,7 +27,7 @@ struct serdev_device;
* not sleep.
*/
struct serdev_device_ops {
- int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
+ ssize_t (*receive_buf)(struct serdev_device *, const u8 *, size_t);
void (*write_wakeup)(struct serdev_device *);
};
@@ -82,7 +82,7 @@ enum serdev_parity {
* serdev controller structures
*/
struct serdev_controller_ops {
- int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t);
+ ssize_t (*write_buf)(struct serdev_controller *, const u8 *, size_t);
void (*write_flush)(struct serdev_controller *);
int (*write_room)(struct serdev_controller *);
int (*open)(struct serdev_controller *);
@@ -99,12 +99,14 @@ struct serdev_controller_ops {
/**
* struct serdev_controller - interface to the serdev controller
* @dev: Driver model representation of the device.
+ * @host: Serial port hardware controller device
* @nr: number identifier for this controller/bus.
* @serdev: Pointer to slave device for this controller.
* @ops: Controller operations.
*/
struct serdev_controller {
struct device dev;
+ struct device *host;
unsigned int nr;
struct serdev_device *serdev;
const struct serdev_controller_ops *ops;
@@ -167,7 +169,9 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *);
int serdev_device_add(struct serdev_device *);
void serdev_device_remove(struct serdev_device *);
-struct serdev_controller *serdev_controller_alloc(struct device *, size_t);
+struct serdev_controller *serdev_controller_alloc(struct device *host,
+ struct device *parent,
+ size_t size);
int serdev_controller_add(struct serdev_controller *);
void serdev_controller_remove(struct serdev_controller *);
@@ -181,9 +185,9 @@ static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl
serdev->ops->write_wakeup(serdev);
}
-static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
- const unsigned char *data,
- size_t count)
+static inline ssize_t serdev_controller_receive_buf(struct serdev_controller *ctrl,
+ const u8 *data,
+ size_t count)
{
struct serdev_device *serdev = ctrl->serdev;
@@ -200,13 +204,13 @@ void serdev_device_close(struct serdev_device *);
int devm_serdev_device_open(struct device *, struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
-int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+int serdev_device_write_buf(struct serdev_device *, const u8 *, size_t);
void serdev_device_wait_until_sent(struct serdev_device *, long);
int serdev_device_get_tiocm(struct serdev_device *);
int serdev_device_set_tiocm(struct serdev_device *, int, int);
int serdev_device_break_ctl(struct serdev_device *serdev, int break_state);
void serdev_device_write_wakeup(struct serdev_device *);
-int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long);
+ssize_t serdev_device_write(struct serdev_device *, const u8 *, size_t, long);
void serdev_device_write_flush(struct serdev_device *);
int serdev_device_write_room(struct serdev_device *);
@@ -244,7 +248,7 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
}
static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
static inline int serdev_device_write_buf(struct serdev_device *serdev,
- const unsigned char *buf,
+ const u8 *buf,
size_t count)
{
return -ENODEV;
@@ -262,8 +266,9 @@ static inline int serdev_device_break_ctl(struct serdev_device *serdev, int brea
{
return -EOPNOTSUPP;
}
-static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
- size_t count, unsigned long timeout)
+static inline ssize_t serdev_device_write(struct serdev_device *sdev,
+ const u8 *buf, size_t count,
+ unsigned long timeout)
{
return -ENODEV;
}
@@ -311,11 +316,13 @@ struct tty_driver;
#ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT
struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *host,
struct device *parent,
struct tty_driver *drv, int idx);
int serdev_tty_port_unregister(struct tty_port *port);
#else
static inline struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *host,
struct device *parent,
struct tty_driver *drv, int idx)
{
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 6f78f302d272..be65de65fe61 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -7,17 +7,34 @@
#ifndef _LINUX_SERIAL_8250_H
#define _LINUX_SERIAL_8250_H
+#include <linux/errno.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
+struct uart_8250_port;
+
/*
* This is the platform device platform_data structure
+ *
+ * @mapsize: Port size for ioremap()
+ * @bugs: Port bugs
+ *
+ * @dl_read: ``u32 ()(struct uart_8250_port *up)``
+ *
+ * UART divisor latch read.
+ *
+ * @dl_write: ``void ()(struct uart_8250_port *up, u32 value)``
+ *
+ * Write @value into UART divisor latch.
+ *
+ * Locking: Caller holds port's lock.
*/
struct plat_serial8250_port {
unsigned long iobase; /* io base address */
void __iomem *membase; /* ioremap cookie or NULL */
resource_size_t mapbase; /* resource base */
+ resource_size_t mapsize;
unsigned int uartclk; /* UART clock rate */
unsigned int irq; /* interrupt number */
unsigned long irqflags; /* request_irq flags */
@@ -28,8 +45,11 @@ struct plat_serial8250_port {
unsigned char has_sysrq; /* supports magic SysRq */
unsigned int type; /* If UPF_FIXED_TYPE */
upf_t flags; /* UPF_* flags */
+ u16 bugs; /* port bugs */
unsigned int (*serial_in)(struct uart_port *, int);
void (*serial_out)(struct uart_port *, int, int);
+ u32 (*dl_read)(struct uart_8250_port *up);
+ void (*dl_write)(struct uart_8250_port *up, u32 value);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
const struct ktermios *old);
@@ -90,15 +110,23 @@ struct uart_8250_em485 {
* their own 8250 ports without registering their own
* platform device. Using these will make your driver
* dependent on the 8250 driver.
+ *
+ * @dl_read: ``u32 ()(struct uart_8250_port *port)``
+ *
+ * UART divisor latch read.
+ *
+ * @dl_write: ``void ()(struct uart_8250_port *port, u32 value)``
+ *
+ * Write @value into UART divisor latch.
+ *
+ * Locking: Caller holds port's lock.
*/
-
struct uart_8250_port {
struct uart_port port;
struct timer_list timer; /* "no irq" timer */
struct list_head list; /* ports on this IRQ */
u32 capabilities; /* port capabilities */
- unsigned short bugs; /* port bugs */
- bool fifo_bug; /* min RX trigger if enabled */
+ u16 bugs; /* port bugs */
unsigned int tx_loadsz; /* transmit fifo load size */
unsigned char acr;
unsigned char fcr;
@@ -129,8 +157,8 @@ struct uart_8250_port {
const struct uart_8250_ops *ops;
/* 8250 specific callbacks */
- int (*dl_read)(struct uart_8250_port *);
- void (*dl_write)(struct uart_8250_port *, int);
+ u32 (*dl_read)(struct uart_8250_port *up);
+ void (*dl_write)(struct uart_8250_port *up, u32 value);
struct uart_8250_em485 *em485;
void (*rs485_start_tx)(struct uart_8250_port *);
@@ -183,8 +211,11 @@ void serial8250_set_isa_configurator(void (*v)(int port, struct uart_port *up,
u32 *capabilities));
#ifdef CONFIG_SERIAL_8250_RT288X
-unsigned int au_serial_in(struct uart_port *p, int offset);
-void au_serial_out(struct uart_port *p, int offset, int value);
+int rt288x_setup(struct uart_port *p);
+int au_platform_setup(struct plat_serial8250_port *p);
+#else
+static inline int rt288x_setup(struct uart_port *p) { return -ENODEV; }
+static inline int au_platform_setup(struct plat_serial8250_port *p) { return -ENODEV; }
#endif
#endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 66ecec15a1bf..55b1f3ba48ac 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -28,6 +28,7 @@
struct uart_port;
struct serial_struct;
+struct serial_port_device;
struct device;
struct gpio_desc;
@@ -458,6 +459,8 @@ struct uart_port {
struct serial_rs485 *rs485);
int (*iso7816_config)(struct uart_port *,
struct serial_iso7816 *iso7816);
+ unsigned int ctrl_id; /* optional serial core controller id */
+ unsigned int port_id; /* optional serial core port id */
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
@@ -563,10 +566,11 @@ struct uart_port {
unsigned int minor;
resource_size_t mapbase; /* for ioremap */
resource_size_t mapsize;
- struct device *dev; /* parent device */
+ struct device *dev; /* serial port physical parent device */
+ struct serial_port_device *port_dev; /* serial core port device */
unsigned long sysrq; /* sysrq timeout */
- unsigned int sysrq_ch; /* char for sysrq */
+ u8 sysrq_ch; /* char for sysrq */
unsigned char has_sysrq;
unsigned char sysrq_seq; /* index in sysrq_toggle_seq */
@@ -584,6 +588,85 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
+/**
+ * uart_port_lock - Lock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock(struct uart_port *up)
+{
+ spin_lock(&up->lock);
+}
+
+/**
+ * uart_port_lock_irq - Lock the UART port and disable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock_irq(struct uart_port *up)
+{
+ spin_lock_irq(&up->lock);
+}
+
+/**
+ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ */
+static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+}
+
+/**
+ * uart_port_trylock - Try to lock the UART port
+ * @up: Pointer to UART port structure
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock(struct uart_port *up)
+{
+ return spin_trylock(&up->lock);
+}
+
+/**
+ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ return spin_trylock_irqsave(&up->lock, *flags);
+}
+
+/**
+ * uart_port_unlock - Unlock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock(struct uart_port *up)
+{
+ spin_unlock(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock_irq(struct uart_port *up)
+{
+ spin_unlock_irq(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
+ * @up: Pointer to UART port structure
+ * @flags: The saved interrupt flags for restore
+ */
+static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
static inline int serial_port_in(struct uart_port *up, int offset)
{
return up->serial_in(up, offset);
@@ -665,8 +748,17 @@ struct uart_driver {
void uart_write_wakeup(struct uart_port *port);
-#define __uart_port_tx(uport, ch, tx_ready, put_char, tx_done, for_test, \
- for_post) \
+/**
+ * enum UART_TX_FLAGS -- flags for uart_port_tx_flags()
+ *
+ * @UART_TX_NOSTOP: don't call port->ops->stop_tx() on empty buffer
+ */
+enum UART_TX_FLAGS {
+ UART_TX_NOSTOP = BIT(0),
+};
+
+#define __uart_port_tx(uport, ch, flags, tx_ready, put_char, tx_done, \
+ for_test, for_post) \
({ \
struct uart_port *__port = (uport); \
struct circ_buf *xmit = &__port->state->xmit; \
@@ -694,7 +786,7 @@ void uart_write_wakeup(struct uart_port *port);
if (pending < WAKEUP_CHARS) { \
uart_write_wakeup(__port); \
\
- if (pending == 0) \
+ if (!((flags) & UART_TX_NOSTOP) && pending == 0) \
__port->ops->stop_tx(__port); \
} \
\
@@ -729,7 +821,7 @@ void uart_write_wakeup(struct uart_port *port);
*/
#define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \
unsigned int __count = (count); \
- __uart_port_tx(port, ch, tx_ready, put_char, tx_done, __count, \
+ __uart_port_tx(port, ch, 0, tx_ready, put_char, tx_done, __count, \
__count--); \
})
@@ -743,8 +835,21 @@ void uart_write_wakeup(struct uart_port *port);
* See uart_port_tx_limited() for more details.
*/
#define uart_port_tx(port, ch, tx_ready, put_char) \
- __uart_port_tx(port, ch, tx_ready, put_char, ({}), true, ({}))
+ __uart_port_tx(port, ch, 0, tx_ready, put_char, ({}), true, ({}))
+
+/**
+ * uart_port_tx_flags -- transmit helper for uart_port with flags
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @flags: %UART_TX_NOSTOP or similar
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx_flags(port, ch, flags, tx_ready, put_char) \
+ __uart_port_tx(port, ch, flags, tx_ready, put_char, ({}), true, ({}))
/*
* Baud rate helpers.
*/
@@ -769,9 +874,9 @@ static inline unsigned long uart_fifo_timeout(struct uart_port *port)
}
/* Base timer interval for polling */
-static inline int uart_poll_timeout(struct uart_port *port)
+static inline unsigned long uart_poll_timeout(struct uart_port *port)
{
- int timeout = uart_fifo_timeout(port);
+ unsigned long timeout = uart_fifo_timeout(port);
return timeout > 6 ? (timeout / 2 - 2) : 1;
}
@@ -853,7 +958,7 @@ void uart_console_write(struct uart_port *port, const char *s,
int uart_register_driver(struct uart_driver *uart);
void uart_unregister_driver(struct uart_driver *uart);
int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
-int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
+void uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
bool uart_match_port(const struct uart_port *port1,
const struct uart_port *port2);
@@ -900,16 +1005,16 @@ void uart_handle_dcd_change(struct uart_port *uport, bool active);
void uart_handle_cts_change(struct uart_port *uport, bool active);
void uart_insert_char(struct uart_port *port, unsigned int status,
- unsigned int overrun, unsigned int ch, unsigned int flag);
+ unsigned int overrun, u8 ch, u8 flag);
void uart_xchar_out(struct uart_port *uport, int offset);
#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
#define SYSRQ_TIMEOUT (HZ * 5)
-bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch);
+bool uart_try_toggle_sysrq(struct uart_port *port, u8 ch);
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -928,7 +1033,7 @@ static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -949,17 +1054,17 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int c
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- int sysrq_ch;
+ u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
@@ -968,38 +1073,38 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
unsigned long flags)
{
- int sysrq_ch;
+ u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
}
#else /* CONFIG_MAGIC_SYSRQ_SERIAL */
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
unsigned long flags)
{
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index ef06a195b3c2..9346cd44814d 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -342,6 +342,12 @@ enum {
SFP_ENCODING = 11,
SFP_BR_NOMINAL = 12,
SFP_RATE_ID = 13,
+ SFF_RID_8079 = 0x01,
+ SFF_RID_8431_RX_ONLY = 0x02,
+ SFF_RID_8431_TX_ONLY = 0x04,
+ SFF_RID_8431 = 0x06,
+ SFF_RID_10G8G = 0x0e,
+
SFP_LINK_LEN_SM_KM = 14,
SFP_LINK_LEN_SM_100M = 15,
SFP_LINK_LEN_50UM_OM2_10M = 16,
@@ -465,6 +471,7 @@ enum {
SFP_STATUS = 110,
SFP_STATUS_TX_DISABLE = BIT(7),
SFP_STATUS_TX_DISABLE_FORCE = BIT(6),
+ SFP_STATUS_RS0_SELECT = BIT(3),
SFP_STATUS_TX_FAULT = BIT(2),
SFP_STATUS_RX_LOS = BIT(1),
SFP_ALARM0 = 112,
@@ -496,6 +503,7 @@ enum {
SFP_WARN1_RXPWR_LOW = BIT(6),
SFP_EXT_STATUS = 118,
+ SFP_EXT_STATUS_RS1_SELECT = BIT(3),
SFP_EXT_STATUS_PWRLVL_SELECT = BIT(0),
SFP_VSL = 120,
@@ -556,6 +564,7 @@ int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
struct netlink_ext_ack *extack);
void sfp_upstream_start(struct sfp_bus *bus);
void sfp_upstream_stop(struct sfp_bus *bus);
+void sfp_upstream_set_signal_rate(struct sfp_bus *bus, unsigned int rate_kbd);
void sfp_bus_put(struct sfp_bus *bus);
struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode);
int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
@@ -615,6 +624,11 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
{
}
+static inline void sfp_upstream_set_signal_rate(struct sfp_bus *bus,
+ unsigned int rate_kbd)
+{
+}
+
static inline void sfp_bus_put(struct sfp_bus *bus)
{
}
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index 37ad81058d6a..27ae79191bdc 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -13,9 +13,9 @@
/*
* Convert back and forth between INTEVT and IRQ values.
*/
-#ifdef CONFIG_CPU_HAS_INTEVT
-#define evt2irq(evt) (((evt) >> 5) - 16)
-#define irq2evt(irq) (((irq) + 16) << 5)
+#ifdef CONFIG_CPU_HAS_INTEVT /* Avoid IRQ0 (invalid for platform devices) */
+#define evt2irq(evt) ((evt) >> 5)
+#define irq2evt(irq) ((irq) << 5)
#else
#define evt2irq(evt) (evt)
#define irq2evt(irq) (irq)
diff --git a/include/linux/shm.h b/include/linux/shm.h
index d8e69aed3d32..c55bef0538e5 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -2,12 +2,12 @@
#ifndef _LINUX_SHM_H_
#define _LINUX_SHM_H_
-#include <linux/list.h>
+#include <linux/types.h>
#include <asm/page.h>
-#include <uapi/linux/shm.h>
#include <asm/shmparam.h>
struct file;
+struct task_struct;
#ifdef CONFIG_SYSVIPC
struct sysv_shm {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 9029abd29b1c..2caa6b86106a 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -13,20 +13,32 @@
/* inode in-kernel data */
+#ifdef CONFIG_TMPFS_QUOTA
+#define SHMEM_MAXQUOTAS 2
+#endif
+
struct shmem_inode_info {
spinlock_t lock;
unsigned int seals; /* shmem seals */
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
unsigned long swapped; /* subtotal assigned to swap */
- pgoff_t fallocend; /* highest fallocate endindex */
- struct list_head shrinklist; /* shrinkable hpage inodes */
- struct list_head swaplist; /* chain of maybes on swap */
+ union {
+ struct offset_ctx dir_offsets; /* stable directory offsets */
+ struct {
+ struct list_head shrinklist; /* shrinkable hpage inodes */
+ struct list_head swaplist; /* chain of maybes on swap */
+ };
+ };
+ struct timespec64 i_crtime; /* file creation time */
struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
+ pgoff_t fallocend; /* highest fallocate endindex */
+ unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
atomic_t stop_eviction; /* hold when working on inode */
- struct timespec64 i_crtime; /* file creation time */
- unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
+#ifdef CONFIG_TMPFS_QUOTA
+ struct dquot *i_dquot[MAXQUOTAS];
+#endif
struct inode vfs_inode;
};
@@ -35,11 +47,18 @@ struct shmem_inode_info {
(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
+struct shmem_quota_limits {
+ qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */
+ qsize_t usrquota_ihardlimit; /* Default user quota inode hard limit */
+ qsize_t grpquota_bhardlimit; /* Default group quota block hard limit */
+ qsize_t grpquota_ihardlimit; /* Default group quota inode hard limit */
+};
+
struct shmem_sb_info {
unsigned long max_blocks; /* How many blocks are allowed */
struct percpu_counter used_blocks; /* How many are allocated */
unsigned long max_inodes; /* How many inodes are allowed */
- unsigned long free_inodes; /* How many are left for allocation */
+ unsigned long free_ispace; /* How much ispace left for allocation */
raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
umode_t mode; /* Mount mode for root directory */
unsigned char huge; /* Whether to try for hugepages */
@@ -53,6 +72,7 @@ struct shmem_sb_info {
spinlock_t shrinklist_lock; /* Protects shrinklist */
struct list_head shrinklist; /* List of shinkable inodes */
unsigned long shrinklist_len; /* Length of shrinklist */
+ struct shmem_quota_limits qlimits; /* Default quota limits */
};
static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
@@ -172,4 +192,17 @@ extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
#endif /* CONFIG_SHMEM */
#endif /* CONFIG_USERFAULTFD */
+/*
+ * Used space is stored as unsigned 64-bit value in bytes but
+ * quota core supports only signed 64-bit values so use that
+ * as a limit
+ */
+#define SHMEM_QUOTA_MAX_SPC_LIMIT 0x7fffffffffffffffLL /* 2^63-1 */
+#define SHMEM_QUOTA_MAX_INO_LIMIT 0x7fffffffffffffffLL
+
+#ifdef CONFIG_TMPFS_QUOTA
+extern const struct dquot_operations shmem_quota_operations;
+extern struct quota_format_type shmem_quota_format;
+#endif /* CONFIG_TMPFS_QUOTA */
+
#endif
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 224293b2dd06..1a00be90d93a 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -4,6 +4,25 @@
#include <linux/atomic.h>
#include <linux/types.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+
+#define SHRINKER_UNIT_BITS BITS_PER_LONG
+
+/*
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to the memcg.
+ */
+struct shrinker_info_unit {
+ atomic_long_t nr_deferred[SHRINKER_UNIT_BITS];
+ DECLARE_BITMAP(map, SHRINKER_UNIT_BITS);
+};
+
+struct shrinker_info {
+ struct rcu_head rcu;
+ int map_nr_max;
+ struct shrinker_info_unit *unit[];
+};
/*
* This struct is used to pass information from page reclaim to the shrinkers.
@@ -70,6 +89,19 @@ struct shrinker {
int seeks; /* seeks to recreate an obj */
unsigned flags;
+ /*
+ * The reference count of this shrinker. Registered shrinker have an
+ * initial refcount of 1, then the lookup operations are now allowed
+ * to use it via shrinker_try_get(). Later in the unregistration step,
+ * the initial refcount will be discarded, and will free the shrinker
+ * asynchronously via RCU after its refcount reaches 0.
+ */
+ refcount_t refcount;
+ struct completion done; /* use to wait for refcount to reach 0 */
+ struct rcu_head rcu;
+
+ void *private_data;
+
/* These are for internal use */
struct list_head list;
#ifdef CONFIG_MEMCG
@@ -86,48 +118,39 @@ struct shrinker {
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
-/* Flags */
-#define SHRINKER_REGISTERED (1 << 0)
-#define SHRINKER_NUMA_AWARE (1 << 1)
-#define SHRINKER_MEMCG_AWARE (1 << 2)
+/* Internal flags */
+#define SHRINKER_REGISTERED BIT(0)
+#define SHRINKER_ALLOCATED BIT(1)
+
+/* Flags for users to use */
+#define SHRINKER_NUMA_AWARE BIT(2)
+#define SHRINKER_MEMCG_AWARE BIT(3)
/*
* It just makes sense when the shrinker is also MEMCG_AWARE for now,
* non-MEMCG_AWARE shrinker should not have this flag set.
*/
-#define SHRINKER_NONSLAB (1 << 3)
+#define SHRINKER_NONSLAB BIT(4)
-extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker,
- const char *fmt, ...);
-extern void register_shrinker_prepared(struct shrinker *shrinker);
-extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker,
- const char *fmt, ...);
-extern void unregister_shrinker(struct shrinker *shrinker);
-extern void free_prealloced_shrinker(struct shrinker *shrinker);
-extern void synchronize_shrinkers(void);
+__printf(2, 3)
+struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...);
+void shrinker_register(struct shrinker *shrinker);
+void shrinker_free(struct shrinker *shrinker);
-#ifdef CONFIG_SHRINKER_DEBUG
-extern int shrinker_debugfs_add(struct shrinker *shrinker);
-extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
- int *debugfs_id);
-extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
- int debugfs_id);
-extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
- const char *fmt, ...);
-#else /* CONFIG_SHRINKER_DEBUG */
-static inline int shrinker_debugfs_add(struct shrinker *shrinker)
-{
- return 0;
-}
-static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
- int *debugfs_id)
+static inline bool shrinker_try_get(struct shrinker *shrinker)
{
- *debugfs_id = -1;
- return NULL;
+ return refcount_inc_not_zero(&shrinker->refcount);
}
-static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
- int debugfs_id)
+
+static inline void shrinker_put(struct shrinker *shrinker)
{
+ if (refcount_dec_and_test(&shrinker->refcount))
+ complete(&shrinker->done);
}
+
+#ifdef CONFIG_SHRINKER_DEBUG
+extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
+ const char *fmt, ...);
+#else /* CONFIG_SHRINKER_DEBUG */
static inline __printf(2, 3)
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
{
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 3b98e7a28538..f19816832f05 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -3,6 +3,7 @@
#define _LINUX_SIGNAL_H
#include <linux/bug.h>
+#include <linux/list.h>
#include <linux/signal_types.h>
#include <linux/string.h>
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index a70b2bdbf4d9..caf4f7a59ab9 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -6,7 +6,7 @@
* Basic signal handling related data type definitions:
*/
-#include <linux/list.h>
+#include <linux/types.h>
#include <uapi/linux/signal.h>
typedef struct kernel_siginfo {
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
index 84aa448d8bb3..c3a00b967d18 100644
--- a/include/linux/sizes.h
+++ b/include/linux/sizes.h
@@ -47,8 +47,17 @@
#define SZ_8G _AC(0x200000000, ULL)
#define SZ_16G _AC(0x400000000, ULL)
#define SZ_32G _AC(0x800000000, ULL)
+#define SZ_64G _AC(0x1000000000, ULL)
+#define SZ_128G _AC(0x2000000000, ULL)
+#define SZ_256G _AC(0x4000000000, ULL)
+#define SZ_512G _AC(0x8000000000, ULL)
#define SZ_1T _AC(0x10000000000, ULL)
+#define SZ_2T _AC(0x20000000000, ULL)
+#define SZ_4T _AC(0x40000000000, ULL)
+#define SZ_8T _AC(0x80000000000, ULL)
+#define SZ_16T _AC(0x100000000000, ULL)
+#define SZ_32T _AC(0x200000000000, ULL)
#define SZ_64T _AC(0x400000000000, ULL)
#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0b40417457cd..2dde34c29203 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -32,7 +32,6 @@
#include <linux/if_packet.h>
#include <linux/llist.h>
#include <net/flow.h>
-#include <net/page_pool.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
@@ -296,7 +295,7 @@ struct nf_bridge_info {
u8 bridged_dnat:1;
u8 sabotage_in_done:1;
__u16 frag_max_size;
- struct net_device *physindev;
+ int physinif;
/* always valid & non-NULL from FORWARD on, for physdev match */
struct net_device *physoutdev;
@@ -330,6 +329,7 @@ struct tc_skb_ext {
u8 post_ct_snat:1;
u8 post_ct_dnat:1;
u8 act_miss:1; /* Set if act_miss_cookie is used */
+ u8 l2_miss:1; /* Set by bridge upon FDB or MDB miss */
};
#endif
@@ -440,8 +440,6 @@ static inline bool skb_frag_must_loop(struct page *p)
copied += p_len, p++, p_off = 0, \
p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
-#define HAVE_HW_TIME_STAMP
-
/**
* struct skb_shared_hwtstamps - hardware time stamps
* @hwtstamp: hardware time stamp transformed into duration
@@ -568,6 +566,15 @@ struct ubuf_info_msgzc {
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
+/* Preserve some data across TX submission and completion.
+ *
+ * Note, this state is stored in the driver. Extending the layout
+ * might need some special care.
+ */
+struct xsk_tx_metadata_compl {
+ __u64 *tx_timestamp;
+};
+
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
@@ -580,7 +587,10 @@ struct skb_shared_info {
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
struct sk_buff *frag_list;
- struct skb_shared_hwtstamps hwtstamps;
+ union {
+ struct skb_shared_hwtstamps hwtstamps;
+ struct xsk_tx_metadata_compl xsk_meta;
+ };
unsigned int gso_type;
u32 tskey;
@@ -744,7 +754,6 @@ typedef unsigned char *sk_buff_data_t;
* @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @_skb_refdst: destination entry (with norefcount bit)
- * @sp: the security path, used for xfrm
* @len: Length of actual data
* @data_len: Data length
* @mac_len: Length of link layer header
@@ -778,7 +787,6 @@ typedef unsigned char *sk_buff_data_t;
* @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
* @_sk_redir: socket redirection information for skmsg
* @_nfct: Associated connection, if any (with nfctinfo bits)
- * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
* @hash: the packet hash
@@ -943,7 +951,7 @@ struct sk_buff {
__u8 __mono_tc_offset[0];
/* public: */
__u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_XGRESS
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
__u8 tc_skip_classify:1;
#endif
@@ -992,7 +1000,7 @@ struct sk_buff {
__u8 csum_not_inet:1;
#endif
-#ifdef CONFIG_NET_SCHED
+#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS)
__u16 tc_index; /* traffic control index */
#endif
@@ -1059,7 +1067,7 @@ struct sk_buff {
refcount_t users;
#ifdef CONFIG_SKB_EXTENSIONS
- /* only useable after checking ->active_extensions != 0 */
+ /* only usable after checking ->active_extensions != 0 */
struct skb_ext *extensions;
#endif
};
@@ -1311,7 +1319,7 @@ struct sk_buff_fclones {
*
* Returns true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
- * so we also check that this didnt happen.
+ * so we also check that didn't happen.
*/
static inline bool skb_fclone_busy(const struct sock *sk,
const struct sk_buff *skb)
@@ -1383,7 +1391,7 @@ static inline int skb_pad(struct sk_buff *skb, int pad)
#define dev_kfree_skb(a) consume_skb(a)
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
- int offset, size_t size);
+ int offset, size_t size, size_t max_frags);
struct skb_seq_state {
__u32 lower_offset;
@@ -2018,7 +2026,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping
- * a packet thats being forwarded.
+ * a packet that's being forwarded.
*/
/**
@@ -2421,20 +2429,22 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
return skb_headlen(skb) + __skb_pagelen(skb);
}
+static inline void skb_frag_fill_page_desc(skb_frag_t *frag,
+ struct page *page,
+ int off, int size)
+{
+ frag->bv_page = page;
+ frag->bv_offset = off;
+ skb_frag_size_set(frag, size);
+}
+
static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
int i, struct page *page,
int off, int size)
{
skb_frag_t *frag = &shinfo->frags[i];
- /*
- * Propagate page pfmemalloc to the skb if we can. The problem is
- * that not all callers have unique ownership of the page but rely
- * on page_is_pfmemalloc doing the right thing(tm).
- */
- frag->bv_page = page;
- frag->bv_offset = off;
- skb_frag_size_set(frag, size);
+ skb_frag_fill_page_desc(frag, page, off, size);
}
/**
@@ -2466,6 +2476,11 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
__skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size);
+
+ /* Propagate page pfmemalloc to the skb if we can. The problem is
+ * that not all callers have unique ownership of the page but rely
+ * on page_is_pfmemalloc doing the right thing(tm).
+ */
page = compound_head(page);
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
@@ -3144,22 +3159,38 @@ static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
}
/**
- * __skb_queue_purge - empty a list
+ * __skb_queue_purge_reason - empty a list
* @list: list to empty
+ * @reason: drop reason
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
-static inline void __skb_queue_purge(struct sk_buff_head *list)
+static inline void __skb_queue_purge_reason(struct sk_buff_head *list,
+ enum skb_drop_reason reason)
{
struct sk_buff *skb;
+
while ((skb = __skb_dequeue(list)) != NULL)
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
+}
+
+static inline void __skb_queue_purge(struct sk_buff_head *list)
+{
+ __skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE);
+}
+
+void skb_queue_purge_reason(struct sk_buff_head *list,
+ enum skb_drop_reason reason);
+
+static inline void skb_queue_purge(struct sk_buff_head *list)
+{
+ skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE);
}
-void skb_queue_purge(struct sk_buff_head *list);
unsigned int skb_rbtree_purge(struct rb_root *root);
+void skb_errqueue_purge(struct sk_buff_head *list);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
@@ -3278,7 +3309,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
unsigned int order)
{
/* This piece of code contains several assumptions.
- * 1. This is for device Rx, therefor a cold page is preferred.
+ * 1. This is for device Rx, therefore a cold page is preferred.
* 2. The expectation is the user wants a compound page.
* 3. If requesting a order 0 page it will not be compound
* due to the check to see if order has a value in prep_new_page
@@ -3415,13 +3446,15 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
+bool napi_pp_put_page(struct page *page, bool napi_safe);
+
static inline void
napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe)
{
struct page *page = skb_frag_page(frag);
#ifdef CONFIG_PAGE_POOL
- if (recycle && page_pool_return_skb_page(page, napi_safe))
+ if (recycle && napi_pp_put_page(page, napi_safe))
return;
#endif
put_page(page);
@@ -3494,32 +3527,6 @@ static inline void skb_frag_page_copy(skb_frag_t *fragto,
fragto->bv_page = fragfrom->bv_page;
}
-/**
- * __skb_frag_set_page - sets the page contained in a paged fragment
- * @frag: the paged fragment
- * @page: the page to set
- *
- * Sets the fragment @frag to contain @page.
- */
-static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
-{
- frag->bv_page = page;
-}
-
-/**
- * skb_frag_set_page - sets the page contained in a paged fragment of an skb
- * @skb: the buffer
- * @f: the fragment offset
- * @page: the page to set
- *
- * Sets the @f'th fragment of @skb to contain @page.
- */
-static inline void skb_frag_set_page(struct sk_buff *skb, int f,
- struct page *page)
-{
- __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
-}
-
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
/**
@@ -3682,6 +3689,9 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
return __skb_put_padto(skb, len, true);
}
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
+ __must_check;
+
static inline int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
@@ -3992,13 +4002,12 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
-bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
-bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
+int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
@@ -4056,6 +4065,14 @@ skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
skb_headlen(skb), buffer);
}
+static inline void * __must_check
+skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len)
+{
+ if (likely(skb_headlen(skb) - offset >= len))
+ return skb->data + offset;
+ return NULL;
+}
+
/**
* skb_needs_linearize - check if we need to linearize a given skb
* depending on the given device features.
@@ -4229,10 +4246,13 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
{
const void *a = skb_metadata_end(skb_a);
const void *b = skb_metadata_end(skb_b);
- /* Using more efficient varaiant than plain call to memcmp(). */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 diffs = 0;
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ BITS_PER_LONG != 64)
+ goto slow;
+
+ /* Using more efficient variant than plain call to memcmp(). */
switch (meta_len) {
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
@@ -4252,11 +4272,11 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
fallthrough;
case 4: diffs |= __it_diff(a, b, 32);
break;
+ default:
+slow:
+ return memcmp(a - meta_len, b - meta_len, meta_len);
}
return diffs;
-#else
- return memcmp(a - meta_len, b - meta_len, meta_len);
-#endif
}
static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
@@ -4859,75 +4879,6 @@ static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
#endif
}
-/* Keeps track of mac header offset relative to skb->head.
- * It is useful for TSO of Tunneling protocol. e.g. GRE.
- * For non-tunnel skb it points to skb_mac_header() and for
- * tunnel skb it points to outer mac header.
- * Keeps track of level of encapsulation of network headers.
- */
-struct skb_gso_cb {
- union {
- int mac_offset;
- int data_offset;
- };
- int encap_level;
- __wsum csum;
- __u16 csum_start;
-};
-#define SKB_GSO_CB_OFFSET 32
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
-
-static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
-{
- return (skb_mac_header(inner_skb) - inner_skb->head) -
- SKB_GSO_CB(inner_skb)->mac_offset;
-}
-
-static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
-{
- int new_headroom, headroom;
- int ret;
-
- headroom = skb_headroom(skb);
- ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
- if (ret)
- return ret;
-
- new_headroom = skb_headroom(skb);
- SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
- return 0;
-}
-
-static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
-{
- /* Do not update partial checksums if remote checksum is enabled. */
- if (skb->remcsum_offload)
- return;
-
- SKB_GSO_CB(skb)->csum = res;
- SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
-}
-
-/* Compute the checksum for a gso segment. First compute the checksum value
- * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
- * then add in skb->csum (checksum from csum_start to end of packet).
- * skb->csum and csum_start are then updated to reflect the checksum of the
- * resultant packet starting from the transport header-- the resultant checksum
- * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
- * header.
- */
-static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
-{
- unsigned char *csum_start = skb_transport_header(skb);
- int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
- __wsum partial = SKB_GSO_CB(skb)->csum;
-
- SKB_GSO_CB(skb)->csum = res;
- SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
-
- return csum_fold(csum_partial(csum_start, plen, partial));
-}
-
static inline bool skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
@@ -5126,5 +5077,8 @@ static inline void skb_mark_for_recycle(struct sk_buff *skb)
#endif
}
+ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
+ ssize_t maxsize, gfp_t gfp);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 054d7911bfc9..e65ec3fd2799 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -62,6 +62,7 @@ struct sk_psock_progs {
enum sk_psock_state_bits {
SK_PSOCK_TX_ENABLED,
+ SK_PSOCK_RX_STRP_ENABLED,
};
struct sk_psock_link {
@@ -99,12 +100,18 @@ struct sk_psock {
void (*saved_close)(struct sock *sk, long timeout);
void (*saved_write_space)(struct sock *sk);
void (*saved_data_ready)(struct sock *sk);
+ /* psock_update_sk_prot may be called with restore=false many times
+ * so the handler must be safe for this case. It will be called
+ * exactly once with restore=true when the psock is being destroyed
+ * and psock refcnt is zero, but before an RCU grace period.
+ */
int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
bool restore);
struct proto *sk_proto;
struct mutex work_mutex;
struct sk_psock_work_state work_state;
struct delayed_work work;
+ struct sock *sk_pair;
struct rcu_work rwork;
};
@@ -498,12 +505,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return !!psock->saved_data_ready;
}
-static inline bool sk_is_udp(const struct sock *sk)
-{
- return sk->sk_type == SOCK_DGRAM &&
- sk->sk_protocol == IPPROTO_UDP;
-}
-
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
#define BPF_F_STRPARSER (1UL << 1)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6b3e155b70bf..b5f5ee8308d0 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -12,16 +12,19 @@
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
+#include <linux/cache.h>
#include <linux/gfp.h>
#include <linux/overflow.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
+#include <linux/cleanup.h>
+#include <linux/hash.h>
/*
* Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
+ * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/
/* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
@@ -53,16 +56,18 @@
* stays valid, the trick to using this is relying on an independent
* object validation pass. Something like:
*
- * rcu_read_lock()
- * again:
+ * begin:
+ * rcu_read_lock();
* obj = lockless_lookup(key);
* if (obj) {
* if (!try_get_ref(obj)) // might fail for free objects
- * goto again;
+ * rcu_read_unlock();
+ * goto begin;
*
* if (obj->key != key) { // not the object we expected
* put_ref(obj);
- * goto again;
+ * rcu_read_unlock();
+ * goto begin;
* }
* }
* rcu_read_unlock();
@@ -106,6 +111,18 @@
/* Avoid kmemleak tracing */
#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
+/*
+ * Prevent merging with compatible kmem caches. This flag should be used
+ * cautiously. Valid use cases:
+ *
+ * - caches created for self-tests (e.g. kunit)
+ * - general caches created and used by a subsystem, only when a
+ * (subsystem-specific) debug option is enabled
+ * - performance critical caches, should be very rare and consulted with slab
+ * maintainers, and not used together with CONFIG_SLUB_TINY
+ */
+#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U)
+
/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
@@ -211,6 +228,8 @@ void kfree(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
+DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+
/**
* ksize - Report actual allocation size of associated object
*
@@ -226,8 +245,9 @@ size_t __ksize(const void *objp);
size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK
-bool kmem_valid_obj(void *object);
-void kmem_dump_obj(void *object);
+bool kmem_dump_obj(void *object);
+#else
+static inline bool kmem_dump_obj(void *object) { return false; }
#endif
/*
@@ -235,12 +255,17 @@ void kmem_dump_obj(void *object);
* alignment larger than the alignment of a 64-bit integer.
* Setting ARCH_DMA_MINALIGN in arch headers allows that.
*/
-#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
+#ifdef ARCH_HAS_DMA_MINALIGN
+#if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
-#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
-#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
-#else
+#endif
+#endif
+
+#ifndef ARCH_KMALLOC_MINALIGN
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#elif ARCH_KMALLOC_MINALIGN > 8
+#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
+#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
#endif
/*
@@ -277,25 +302,15 @@ static inline unsigned int arch_slab_minalign(void)
* Kmalloc array related definitions
*/
-#ifdef CONFIG_SLAB
/*
- * SLAB and SLUB directly allocates requests fitting in to an order-1 page
+ * SLUB directly allocates requests fitting in to an order-1 page
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW 5
-#endif
-#endif
-
-#ifdef CONFIG_SLUB
-#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
-#endif
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
@@ -322,6 +337,12 @@ static inline unsigned int arch_slab_minalign(void)
#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
(KMALLOC_MIN_SIZE) : 16)
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+#define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies
+#else
+#define RANDOM_KMALLOC_CACHES_NR 0
+#endif
+
/*
* Whenever changing this, take care of that kmalloc_type() and
* create_kmalloc_caches() still work as intended.
@@ -338,6 +359,8 @@ enum kmalloc_cache_type {
#ifndef CONFIG_MEMCG_KMEM
KMALLOC_CGROUP = KMALLOC_NORMAL,
#endif
+ KMALLOC_RANDOM_START = KMALLOC_NORMAL,
+ KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
#ifdef CONFIG_SLUB_TINY
KMALLOC_RECLAIM = KMALLOC_NORMAL,
#else
@@ -363,14 +386,22 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
(IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
(IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
-static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
+extern unsigned long random_kmalloc_seed;
+
+static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
{
/*
* The most common case is KMALLOC_NORMAL, so test for it
* with a single branch for all the relevant flags.
*/
if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+ /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
+ return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
+ ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
+#else
return KMALLOC_NORMAL;
+#endif
/*
* At least one of the flags has to be set. Their priorities in
@@ -557,7 +588,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
index = kmalloc_index(size);
return kmalloc_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
+ kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, size);
}
return __kmalloc(size, flags);
@@ -573,7 +604,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
index = kmalloc_index(size);
return kmalloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
+ kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, node, size);
}
return __kmalloc_node(size, flags, node);
@@ -723,6 +754,8 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
__realloc_size(3);
extern void kvfree(const void *addr);
+DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
+
extern void kvfree_sensitive(const void *addr, size_t len);
unsigned int kmem_cache_size(struct kmem_cache *s);
@@ -745,12 +778,4 @@ size_t kmalloc_size_roundup(size_t size);
void __init kmem_cache_init_late(void);
-#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
-int slab_prepare_cpu(unsigned int cpu);
-int slab_dead_cpu(unsigned int cpu);
-#else
-#define slab_prepare_cpu NULL
-#define slab_dead_cpu NULL
-#endif
-
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
deleted file mode 100644
index a61e7d55d0d3..000000000000
--- a/include/linux/slab_def.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLAB_DEF_H
-#define _LINUX_SLAB_DEF_H
-
-#include <linux/kfence.h>
-#include <linux/reciprocal_div.h>
-
-/*
- * Definitions unique to the original Linux SLAB allocator.
- */
-
-struct kmem_cache {
- struct array_cache __percpu *cpu_cache;
-
-/* 1) Cache tunables. Protected by slab_mutex */
- unsigned int batchcount;
- unsigned int limit;
- unsigned int shared;
-
- unsigned int size;
- struct reciprocal_value reciprocal_buffer_size;
-/* 2) touched by every alloc & free from the backend */
-
- slab_flags_t flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
-
-/* 3) cache_grow/shrink */
- /* order of pgs per slab (2^n) */
- unsigned int gfporder;
-
- /* force GFP flags, e.g. GFP_DMA */
- gfp_t allocflags;
-
- size_t colour; /* cache colouring range */
- unsigned int colour_off; /* colour offset */
- unsigned int freelist_size;
-
- /* constructor func */
- void (*ctor)(void *obj);
-
-/* 4) cache creation/removal */
- const char *name;
- struct list_head list;
- int refcount;
- int object_size;
- int align;
-
-/* 5) statistics */
-#ifdef CONFIG_DEBUG_SLAB
- unsigned long num_active;
- unsigned long num_allocations;
- unsigned long high_mark;
- unsigned long grown;
- unsigned long reaped;
- unsigned long errors;
- unsigned long max_freeable;
- unsigned long node_allocs;
- unsigned long node_frees;
- unsigned long node_overflow;
- atomic_t allochit;
- atomic_t allocmiss;
- atomic_t freehit;
- atomic_t freemiss;
-
- /*
- * If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. 'size' contains the total
- * object size including these internal fields, while 'obj_offset'
- * and 'object_size' contain the offset to the user object and its
- * size.
- */
- int obj_offset;
-#endif /* CONFIG_DEBUG_SLAB */
-
-#ifdef CONFIG_KASAN_GENERIC
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_HARDENED_USERCOPY
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-#endif
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x)
-{
- void *object = x - (x - slab->s_mem) % cache->size;
- void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
-
- if (unlikely(object > last_object))
- return last_object;
- else
- return object;
-}
-
-/*
- * We want to avoid an expensive divide : (offset / cache->size)
- * Using the fact that size is a constant for a particular cache,
- * we can replace (offset / cache->size) by
- * reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
-{
- u32 offset = (obj - slab->s_mem);
- return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
-{
- if (is_kfence_address(slab_address(slab)))
- return 1;
- return cache->num;
-}
-
-#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
deleted file mode 100644
index f6df03f934e5..000000000000
--- a/include/linux/slub_def.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLUB_DEF_H
-#define _LINUX_SLUB_DEF_H
-
-/*
- * SLUB : A Slab allocator without object queues.
- *
- * (C) 2007 SGI, Christoph Lameter
- */
-#include <linux/kfence.h>
-#include <linux/kobject.h>
-#include <linux/reciprocal_div.h>
-#include <linux/local_lock.h>
-
-enum stat_item {
- ALLOC_FASTPATH, /* Allocation from cpu slab */
- ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slab */
- FREE_SLOWPATH, /* Freeing not to cpu slab */
- FREE_FROZEN, /* Freeing to frozen slab */
- FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
- FREE_REMOVE_PARTIAL, /* Freeing removes last object */
- ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
- ALLOC_SLAB, /* Cpu slab acquired from page allocator */
- ALLOC_REFILL, /* Refill cpu slab from slab freelist */
- ALLOC_NODE_MISMATCH, /* Switching cpu slab */
- FREE_SLAB, /* Slab freed to the page allocator */
- CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
- DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
- DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
- DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
- DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
- DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
- DEACTIVATE_BYPASS, /* Implicit deactivation */
- ORDER_FALLBACK, /* Number of times fallback was necessary */
- CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
- CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
- CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
- CPU_PARTIAL_FREE, /* Refill cpu partial on free */
- CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
- CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
- NR_SLUB_STAT_ITEMS };
-
-#ifndef CONFIG_SLUB_TINY
-/*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
-struct kmem_cache_cpu {
- void **freelist; /* Pointer to next available object */
- unsigned long tid; /* Globally unique transaction id */
- struct slab *slab; /* The slab from which we are allocating */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- struct slab *partial; /* Partially allocated frozen slabs */
-#endif
- local_lock_t lock; /* Protects the fields above */
-#ifdef CONFIG_SLUB_STATS
- unsigned stat[NR_SLUB_STAT_ITEMS];
-#endif
-};
-#endif /* CONFIG_SLUB_TINY */
-
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_percpu_partial(c) ((c)->partial)
-
-#define slub_set_percpu_partial(c, p) \
-({ \
- slub_percpu_partial(c) = (p)->next; \
-})
-
-#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
-#else
-#define slub_percpu_partial(c) NULL
-
-#define slub_set_percpu_partial(c, p)
-
-#define slub_percpu_partial_read_once(c) NULL
-#endif // CONFIG_SLUB_CPU_PARTIAL
-
-/*
- * Word size structure that can be atomically updated or read and that
- * contains both the order and the number of objects that a slab of the
- * given order would contain.
- */
-struct kmem_cache_order_objects {
- unsigned int x;
-};
-
-/*
- * Slab cache management.
- */
-struct kmem_cache {
-#ifndef CONFIG_SLUB_TINY
- struct kmem_cache_cpu __percpu *cpu_slab;
-#endif
- /* Used for retrieving partial slabs, etc. */
- slab_flags_t flags;
- unsigned long min_partial;
- unsigned int size; /* The size of an object including metadata */
- unsigned int object_size;/* The size of an object without metadata */
- struct reciprocal_value reciprocal_size;
- unsigned int offset; /* Free pointer offset */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- /* Number of per cpu partial objects to keep around */
- unsigned int cpu_partial;
- /* Number of per cpu partial slabs to keep around */
- unsigned int cpu_partial_slabs;
-#endif
- struct kmem_cache_order_objects oo;
-
- /* Allocation and freeing of slabs */
- struct kmem_cache_order_objects min;
- gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
- unsigned int inuse; /* Offset to metadata */
- unsigned int align; /* Alignment */
- unsigned int red_left_pad; /* Left redzone padding size */
- const char *name; /* Name (only for display!) */
- struct list_head list; /* List of slab caches */
-#ifdef CONFIG_SYSFS
- struct kobject kobj; /* For sysfs */
-#endif
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- unsigned long random;
-#endif
-
-#ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
- */
- unsigned int remote_node_defrag_ratio;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_KASAN_GENERIC
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_HARDENED_USERCOPY
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-#endif
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
-#define SLAB_SUPPORTS_SYSFS
-void sysfs_slab_unlink(struct kmem_cache *);
-void sysfs_slab_release(struct kmem_cache *);
-#else
-static inline void sysfs_slab_unlink(struct kmem_cache *s)
-{
-}
-static inline void sysfs_slab_release(struct kmem_cache *s)
-{
-}
-#endif
-
-void *fixup_red_left(struct kmem_cache *s, void *p);
-
-static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x) {
- void *object = x - (x - slab_address(slab)) % cache->size;
- void *last_object = slab_address(slab) +
- (slab->objects - 1) * cache->size;
- void *result = (unlikely(object > last_object)) ? last_object : object;
-
- result = fixup_red_left(cache, result);
- return result;
-}
-
-/* Determine object index from a given position */
-static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
-{
- return reciprocal_divide(kasan_reset_tag(obj) - addr,
- cache->reciprocal_size);
-}
-
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
-{
- if (is_kfence_address(obj))
- return 0;
- return __obj_to_index(cache, slab_address(slab), obj);
-}
-
-static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
-{
- return slab->objects;
-}
-#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 91ea4a67f8ca..e87520dc2959 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, const struct cpumask *mask);
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
/*
* Cpus stopping functions in panic. All have default weak definitions.
diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h
index e1c88627755a..1a6a851d2cf8 100644
--- a/include/linux/smscphy.h
+++ b/include/linux/smscphy.h
@@ -38,4 +38,38 @@ int smsc_phy_set_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, const void *data);
int smsc_phy_probe(struct phy_device *phydev);
+#define MII_LAN874X_PHY_MMD_WOL_WUCSR 0x8010
+#define MII_LAN874X_PHY_MMD_WOL_WUF_CFGA 0x8011
+#define MII_LAN874X_PHY_MMD_WOL_WUF_CFGB 0x8012
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK0 0x8021
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK1 0x8022
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK2 0x8023
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK3 0x8024
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK4 0x8025
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK5 0x8026
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK6 0x8027
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK7 0x8028
+#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRA 0x8061
+#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRB 0x8062
+#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRC 0x8063
+#define MII_LAN874X_PHY_MMD_MCFGR 0x8064
+
+#define MII_LAN874X_PHY_PME1_SET (2 << 13)
+#define MII_LAN874X_PHY_PME2_SET (2 << 11)
+#define MII_LAN874X_PHY_PME_SELF_CLEAR BIT(9)
+#define MII_LAN874X_PHY_WOL_PFDA_FR BIT(7)
+#define MII_LAN874X_PHY_WOL_WUFR BIT(6)
+#define MII_LAN874X_PHY_WOL_MPR BIT(5)
+#define MII_LAN874X_PHY_WOL_BCAST_FR BIT(4)
+#define MII_LAN874X_PHY_WOL_PFDAEN BIT(3)
+#define MII_LAN874X_PHY_WOL_WUEN BIT(2)
+#define MII_LAN874X_PHY_WOL_MPEN BIT(1)
+#define MII_LAN874X_PHY_WOL_BCSTEN BIT(0)
+
+#define MII_LAN874X_PHY_WOL_FILTER_EN BIT(15)
+#define MII_LAN874X_PHY_WOL_FILTER_MCASTTEN BIT(9)
+#define MII_LAN874X_PHY_WOL_FILTER_BCSTEN BIT(8)
+
+#define MII_LAN874X_PHY_PME_SELF_CLEAR_DELAY 0x1000 /* 81 milliseconds */
+
#endif /* __LINUX_SMSCPHY_H__ */
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
index fc456f75c131..8c9ca857ccf6 100644
--- a/include/linux/soc/apple/rtkit.h
+++ b/include/linux/soc/apple/rtkit.h
@@ -161,24 +161,6 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
struct completion *completion, bool atomic);
/*
- * Send a message to the given endpoint and wait until it has been submitted
- * to the hardware FIFO.
- * Will return zero on success and a negative error code on failure
- * (e.g. -ETIME when the message couldn't be written within the given
- * timeout)
- *
- * @rtk: RTKit reference
- * @ep: target endpoint
- * @message: message to be sent
- * @timeout: timeout in milliseconds to allow the message transmission
- * to be completed
- * @atomic: if set to true this function can be called from atomic
- * context.
- */
-int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
- unsigned long timeout, bool atomic);
-
-/*
* Process incoming messages in atomic context.
* This only guarantees that messages arrive as far as the recv_message_early
* callback; drivers expecting to handle incoming messages synchronously
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index 07f67b3d8e97..6c6cccc848f4 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -2,6 +2,47 @@
#ifndef __SOC_MEDIATEK_INFRACFG_H
#define __SOC_MEDIATEK_INFRACFG_H
+#define MT8365_INFRA_TOPAXI_PROTECTEN_STA1 0x228
+#define MT8365_INFRA_TOPAXI_PROTECTEN_SET 0x2a0
+#define MT8365_INFRA_TOPAXI_PROTECTEN_CLR 0x2a4
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM_M0 BIT(1)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MDMCU_M1 BIT(2)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MMAPB_S BIT(6)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_0 BIT(10)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_1 BIT(11)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_AP2CONN_AHB BIT(13)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_CONN2INFRA_AHB BIT(14)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MFG_M0 BIT(21)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_INFRA2MFG BIT(22)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_STA1 0x258
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_SET 0x2a8
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CLR 0x2ac
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU2AP BIT(2)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_0 BIT(16)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_1 BIT(17)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CONN2INFRA_AXI_GALS_MST BIT(18)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CAM2MM_AXI_GALS_MST BIT(19)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU_CBIP_GALS_MST BIT(20)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_INFRA2CONN_AHB_GALS_SLV BIT(21)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_INFRA_GALS_ADB BIT(24)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_MP1_L2C_AFIFO BIT(27)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_AUDIO_M BIT(28)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_M BIT(30)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_S BIT(31)
+
+#define MT8365_INFRA_NAO_TOPAXI_SI0_STA 0x0
+#define MT8365_INFRA_NAO_TOPAXI_SI0_CTRL_UPDATED BIT(24)
+#define MT8365_INFRA_NAO_TOPAXI_SI2_STA 0x28
+#define MT8365_INFRA_NAO_TOPAXI_SI2_CTRL_UPDATED BIT(14)
+#define MT8365_INFRA_TOPAXI_SI0_CTL 0x200
+#define MT8365_INFRA_TOPAXI_SI0_WAY_EN_MMAPB_S BIT(6)
+#define MT8365_INFRA_TOPAXI_SI2_CTL 0x234
+#define MT8365_INFRA_TOPAXI_SI2_WAY_EN_PERI_M1 BIT(5)
+
+#define MT8365_SMI_COMMON_CLAMP_EN 0x3c0
+#define MT8365_SMI_COMMON_CLAMP_EN_SET 0x3c4
+#define MT8365_SMI_COMMON_CLAMP_EN_CLR 0x3c8
+
#define MT8195_TOP_AXI_PROT_EN_STA1 0x228
#define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258
#define MT8195_TOP_AXI_PROT_EN_SET 0x2a0
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index 37544ea6286d..4885b065b849 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -27,8 +27,7 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_CCORR,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
- DDP_COMPONENT_DITHER,
- DDP_COMPONENT_DITHER0 = DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DITHER0,
DDP_COMPONENT_DITHER1,
DDP_COMPONENT_DP_INTF0,
DDP_COMPONENT_DP_INTF1,
@@ -63,6 +62,14 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_OVL_2L2,
DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_PADDING0,
+ DDP_COMPONENT_PADDING1,
+ DDP_COMPONENT_PADDING2,
+ DDP_COMPONENT_PADDING3,
+ DDP_COMPONENT_PADDING4,
+ DDP_COMPONENT_PADDING5,
+ DDP_COMPONENT_PADDING6,
+ DDP_COMPONENT_PADDING7,
DDP_COMPONENT_POSTMASK0,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index b2b28180dff7..a476648858a6 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -10,6 +10,7 @@
#define MTK_WED_TX_QUEUES 2
#define MTK_WED_RX_QUEUES 2
+#define MTK_WED_RX_PAGE_QUEUES 3
#define WED_WO_STA_REC 0x6
@@ -45,7 +46,7 @@ enum mtk_wed_wo_cmd {
MTK_WED_WO_CMD_WED_END
};
-struct mtk_rxbm_desc {
+struct mtk_wed_bm_desc {
__le32 buf0;
__le32 token;
} __packed __aligned(4);
@@ -76,6 +77,11 @@ struct mtk_wed_wo_rx_stats {
__le32 rx_drop_cnt;
};
+struct mtk_wed_buf {
+ void *p;
+ dma_addr_t phy_addr;
+};
+
struct mtk_wed_device {
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
const struct mtk_wed_ops *ops;
@@ -94,17 +100,20 @@ struct mtk_wed_device {
struct mtk_wed_ring txfree_ring;
struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
+ struct mtk_wed_ring ind_cmd_ring;
struct {
int size;
- void **pages;
+ struct mtk_wed_buf *pages;
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
} tx_buf_ring;
struct {
int size;
- struct mtk_rxbm_desc *desc;
+ struct mtk_wed_bm_desc *desc;
dma_addr_t desc_phys;
} rx_buf_ring;
@@ -114,6 +123,13 @@ struct mtk_wed_device {
dma_addr_t fdbk_phys;
} rro;
+ struct {
+ int size;
+ struct mtk_wed_buf *pages;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } hw_rro;
+
/* filled by driver: */
struct {
union {
@@ -123,6 +139,7 @@ struct mtk_wed_device {
enum mtk_wed_bus_tye bus_type;
void __iomem *base;
u32 phy_base;
+ u32 id;
u32 wpdma_phys;
u32 wpdma_int;
@@ -131,18 +148,35 @@ struct mtk_wed_device {
u32 wpdma_txfree;
u32 wpdma_rx_glo;
u32 wpdma_rx;
+ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
+ u32 wpdma_rx_pg;
bool wcid_512;
+ bool hw_rro;
+ bool msi;
u16 token_start;
unsigned int nbuf;
unsigned int rx_nbuf;
unsigned int rx_npkt;
unsigned int rx_size;
+ unsigned int amsdu_max_len;
u8 tx_tbit[MTK_WED_TX_QUEUES];
u8 rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
u8 txfree_tbit;
+ u8 amsdu_max_subframes;
+
+ struct {
+ u8 se_group_nums;
+ u16 win_size;
+ u16 particular_sid;
+ u32 ack_sn_addr;
+ dma_addr_t particular_se_phys;
+ dma_addr_t addr_elem_phys[1024];
+ } ind_cmd;
u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
int (*offload_enable)(struct mtk_wed_device *wed);
@@ -182,6 +216,14 @@ struct mtk_wed_ops {
void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
enum tc_setup_type type, void *type_data);
+ void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
+ bool reset);
+ void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
+ void __iomem *regs);
};
extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
@@ -206,16 +248,27 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
return ret;
}
-static inline bool
-mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (dev->version == 3)
+ return dev->wlan.hw_rro;
+
return dev->version != 1;
#else
return false;
#endif
}
+static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ return dev->version == 3;
+#else
+ return false;
+#endif
+}
+
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
#define mtk_wed_device_active(_dev) !!(_dev)->ops
#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
@@ -242,6 +295,15 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
(_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
+ (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
+ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
+
#else
static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
{
@@ -261,6 +323,10 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
#define mtk_wed_device_stop(_dev) do {} while (0)
#define mtk_wed_device_dma_reset(_dev) do {} while (0)
#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
#endif
#endif
diff --git a/include/linux/soc/pxa/smemc.h b/include/linux/soc/pxa/smemc.h
index f1ffea236c15..4feb1dded3ec 100644
--- a/include/linux/soc/pxa/smemc.h
+++ b/include/linux/soc/pxa/smemc.h
@@ -10,4 +10,20 @@ int pxa2xx_smemc_get_sdram_rows(void);
unsigned int pxa3xx_smemc_get_memclkdiv(void);
void __iomem *pxa_smemc_get_mdrefr(void);
+/*
+ * Once fully converted to the clock framework, all these functions should be
+ * removed, and replaced with a clk_get(NULL, "core").
+ */
+#ifdef CONFIG_PXA25x
+extern unsigned pxa25x_get_clk_frequency_khz(int);
+#else
+#define pxa25x_get_clk_frequency_khz(x) (0)
+#endif
+
+#ifdef CONFIG_PXA27x
+extern unsigned pxa27x_get_clk_frequency_khz(int);
+#else
+#define pxa27x_get_clk_frequency_khz(x) (0)
+#endif
+
#endif
diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h
index c55a0bc8cb0e..29e06905bc1f 100644
--- a/include/linux/soc/qcom/geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -35,6 +35,7 @@ enum geni_se_protocol_type {
GENI_SE_UART,
GENI_SE_I2C,
GENI_SE_I3C,
+ GENI_SE_SPI_SLAVE,
};
struct geni_wrapper;
@@ -73,12 +74,14 @@ struct geni_se {
/* Common SE registers */
#define GENI_FORCE_DEFAULT_REG 0x20
+#define GENI_OUTPUT_CTRL 0x24
#define SE_GENI_STATUS 0x40
#define GENI_SER_M_CLK_CFG 0x48
#define GENI_SER_S_CLK_CFG 0x4c
#define GENI_IF_DISABLE_RO 0x64
#define GENI_FW_REVISION_RO 0x68
#define SE_GENI_CLK_SEL 0x7c
+#define SE_GENI_CFG_SEQ_START 0x84
#define SE_GENI_DMA_MODE_EN 0x258
#define SE_GENI_M_CMD0 0x600
#define SE_GENI_M_CMD_CTRL_REG 0x604
@@ -111,6 +114,9 @@ struct geni_se {
/* GENI_FORCE_DEFAULT_REG fields */
#define FORCE_DEFAULT BIT(0)
+/* GENI_OUTPUT_CTRL fields */
+#define GENI_IO_MUX_0_EN BIT(0)
+
/* GENI_STATUS fields */
#define M_GENI_CMD_ACTIVE BIT(0)
#define S_GENI_CMD_ACTIVE BIT(12)
@@ -130,6 +136,9 @@ struct geni_se {
/* GENI_CLK_SEL fields */
#define CLK_SEL_MSK GENMASK(2, 0)
+/* SE_GENI_CFG_SEQ_START fields */
+#define START_TRIGGER BIT(0)
+
/* SE_GENI_DMA_MODE_EN */
#define GENI_DMA_MODE_EN BIT(0)
@@ -490,9 +499,13 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
unsigned int *index, unsigned long *res_freq,
bool exact);
+void geni_se_tx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len);
+
int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
dma_addr_t *iova);
+void geni_se_rx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len);
+
int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
dma_addr_t *iova);
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 93417ba1ead4..1a886666bbb6 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -30,7 +30,7 @@
#define LLCC_NPU 23
#define LLCC_WLHW 24
#define LLCC_PIMEM 25
-#define LLCC_DRE 26
+#define LLCC_ECC 26
#define LLCC_CVP 28
#define LLCC_MODPE 29
#define LLCC_APTCM 30
diff --git a/include/linux/soc/qcom/qcom_aoss.h b/include/linux/soc/qcom/qcom_aoss.h
index 3c2a82e606f8..7361ca028752 100644
--- a/include/linux/soc/qcom/qcom_aoss.h
+++ b/include/linux/soc/qcom/qcom_aoss.h
@@ -13,13 +13,13 @@ struct qmp;
#if IS_ENABLED(CONFIG_QCOM_AOSS_QMP)
-int qmp_send(struct qmp *qmp, const void *data, size_t len);
+int qmp_send(struct qmp *qmp, const char *fmt, ...);
struct qmp *qmp_get(struct device *dev);
void qmp_put(struct qmp *qmp);
#else
-static inline int qmp_send(struct qmp *qmp, const void *data, size_t len)
+static inline int qmp_send(struct qmp *qmp, const char *fmt, ...)
{
return -ENODEV;
}
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 2990f425fdef..8190878645f9 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -2,10 +2,13 @@
#ifndef __QCOM_SMD_RPM_H__
#define __QCOM_SMD_RPM_H__
+#include <linux/types.h>
+
struct qcom_smd_rpm;
-#define QCOM_SMD_RPM_ACTIVE_STATE 0
-#define QCOM_SMD_RPM_SLEEP_STATE 1
+#define QCOM_SMD_RPM_ACTIVE_STATE 0
+#define QCOM_SMD_RPM_SLEEP_STATE 1
+#define QCOM_SMD_RPM_STATE_NUM 2
/*
* Constants used for addressing resources in the RPM.
@@ -44,6 +47,19 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_PKA_CLK 0x616b70
#define QCOM_SMD_RPM_MCFG_CLK 0x6766636d
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
int state,
u32 resource_type, u32 resource_id,
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
index 86e1b358688a..a36a3b9d4929 100644
--- a/include/linux/soc/qcom/smem.h
+++ b/include/linux/soc/qcom/smem.h
@@ -4,6 +4,7 @@
#define QCOM_SMEM_HOST_ANY -1
+bool qcom_smem_is_available(void);
int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
void *qcom_smem_get(unsigned host, unsigned item, size_t *size);
@@ -11,4 +12,6 @@ int qcom_smem_get_free_space(unsigned host);
phys_addr_t qcom_smem_virt_to_phys(void *p);
+int qcom_smem_get_soc_id(u32 *id);
+
#endif
diff --git a/include/linux/soc/qcom/socinfo.h b/include/linux/soc/qcom/socinfo.h
new file mode 100644
index 000000000000..e78777bb0f4a
--- /dev/null
+++ b/include/linux/soc/qcom/socinfo.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_SOCINFO_H__
+#define __QCOM_SOCINFO_H__
+
+/*
+ * SMEM item id, used to acquire handles to respective
+ * SMEM region.
+ */
+#define SMEM_HW_SW_BUILD_ID 137
+
+#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
+#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
+
+/* Socinfo SMEM item structure */
+struct socinfo {
+ __le32 fmt;
+ __le32 id;
+ __le32 ver;
+ char build_id[SMEM_SOCINFO_BUILD_ID_LENGTH];
+ /* Version 2 */
+ __le32 raw_id;
+ __le32 raw_ver;
+ /* Version 3 */
+ __le32 hw_plat;
+ /* Version 4 */
+ __le32 plat_ver;
+ /* Version 5 */
+ __le32 accessory_chip;
+ /* Version 6 */
+ __le32 hw_plat_subtype;
+ /* Version 7 */
+ __le32 pmic_model;
+ __le32 pmic_die_rev;
+ /* Version 8 */
+ __le32 pmic_model_1;
+ __le32 pmic_die_rev_1;
+ __le32 pmic_model_2;
+ __le32 pmic_die_rev_2;
+ /* Version 9 */
+ __le32 foundry_id;
+ /* Version 10 */
+ __le32 serial_num;
+ /* Version 11 */
+ __le32 num_pmics;
+ __le32 pmic_array_offset;
+ /* Version 12 */
+ __le32 chip_family;
+ __le32 raw_device_family;
+ __le32 raw_device_num;
+ /* Version 13 */
+ __le32 nproduct_id;
+ char chip_id[SMEM_SOCINFO_CHIP_ID_LENGTH];
+ /* Version 14 */
+ __le32 num_clusters;
+ __le32 ncluster_array_offset;
+ __le32 num_subset_parts;
+ __le32 nsubset_parts_array_offset;
+ /* Version 15 */
+ __le32 nmodem_supported;
+ /* Version 16 */
+ __le32 feature_code;
+ __le32 pcode;
+ __le32 npartnamemap_offset;
+ __le32 nnum_partname_mapping;
+ /* Version 17 */
+ __le32 oem_variant;
+ /* Version 18 */
+ __le32 num_kvps;
+ __le32 kvps_offset;
+ /* Version 19 */
+ __le32 num_func_clusters;
+ __le32 boot_cluster;
+ __le32 boot_core;
+};
+
+#endif
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 13c3a237b9c9..cfcb7e2c3813 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -177,6 +177,7 @@ static inline size_t msg_data_left(struct msghdr *msg)
#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */
#define SCM_CREDENTIALS 0x02 /* rw: struct ucred */
#define SCM_SECURITY 0x03 /* rw: security label */
+#define SCM_PIDFD 0x04 /* ro: pidfd (int) */
struct ucred {
__u32 pid;
@@ -318,7 +319,6 @@ struct ucred {
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
-#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
@@ -327,6 +327,7 @@ struct ucred {
*/
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
+#define MSG_SPLICE_PAGES 0x8000000 /* Splice the pages from the iterator in sendmsg() */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
descriptor received through
@@ -337,6 +338,9 @@ struct ucred {
#define MSG_CMSG_COMPAT 0 /* We never have 32 bit fixups */
#endif
+/* Flags to be cleared on entry by sendmsg and sendmmsg syscalls */
+#define MSG_INTERNAL_SENDMSG_FLAGS \
+ (MSG_SPLICE_PAGES | MSG_SENDPAGE_NOPOLICY | MSG_SENDPAGE_DECRYPTED)
/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
#define SOL_IP 0
@@ -379,6 +383,7 @@ struct ucred {
#define SOL_MPTCP 284
#define SOL_MCTP 285
#define SOL_SMC 286
+#define SOL_VSOCK 287
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index bae5e2369b4f..307961b41541 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -55,6 +55,29 @@ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
return copy_from_sockptr_offset(dst, src, 0, size);
}
+static inline int copy_struct_from_sockptr(void *dst, size_t ksize,
+ sockptr_t src, size_t usize)
+{
+ size_t size = min(ksize, usize);
+ size_t rest = max(ksize, usize) - size;
+
+ if (!sockptr_is_kernel(src))
+ return copy_struct_from_user(dst, ksize, src.user, size);
+
+ if (usize < ksize) {
+ memset(dst + size, 0, rest);
+ } else if (usize > ksize) {
+ char *p = src.kernel;
+
+ while (rest--) {
+ if (*p++)
+ return -E2BIG;
+ }
+ }
+ memcpy(dst, src.kernel, size);
+ return 0;
+}
+
static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
const void *src, size_t size)
{
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index ef645de13ae9..66f814b63a43 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -5,6 +5,9 @@
#define __SOUNDWIRE_H
#include <linux/bug.h>
+#include <linux/lockdep_types.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/mod_devicetable.h>
#include <linux/bitfield.h>
@@ -369,6 +372,7 @@ struct sdw_dpn_prop {
* @clock_reg_supported: the Peripheral implements the clock base and scale
* registers introduced with the SoundWire 1.2 specification. SDCA devices
* do not need to set this boolean property as the registers are required.
+ * @use_domain_irq: call actual IRQ handler on slave, as well as callback
*/
struct sdw_slave_prop {
u32 mipi_revision;
@@ -393,6 +397,7 @@ struct sdw_slave_prop {
u8 scp_int1_mask;
u32 quirks;
bool clock_reg_supported;
+ bool use_domain_irq;
};
#define SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY BIT(0)
@@ -481,6 +486,11 @@ struct sdw_slave_id {
__u8 sdw_version:4;
};
+struct sdw_extended_slave_id {
+ int link_id;
+ struct sdw_slave_id id;
+};
+
/*
* Helper macros to extract the MIPI-defined IDs
*
@@ -640,6 +650,7 @@ struct sdw_slave_ops {
* struct sdw_slave - SoundWire Slave
* @id: MIPI device ID
* @dev: Linux device
+ * @irq: IRQ number
* @status: Status reported by the Slave
* @bus: Bus handle
* @prop: Slave properties
@@ -669,6 +680,7 @@ struct sdw_slave_ops {
struct sdw_slave {
struct sdw_slave_id id;
struct device dev;
+ int irq;
enum sdw_slave_status status;
struct sdw_bus *bus;
struct sdw_slave_prop prop;
@@ -846,6 +858,9 @@ struct sdw_defer {
* @post_bank_switch: Callback for post bank switch
* @read_ping_status: Read status from PING frames, reported with two bits per Device.
* Bits 31:24 are reserved.
+ * @get_device_num: Callback for vendor-specific device_number allocation
+ * @put_device_num: Callback for vendor-specific device_number release
+ * @new_peripheral_assigned: Callback to handle enumeration of new peripheral.
*/
struct sdw_master_ops {
int (*read_prop)(struct sdw_bus *bus);
@@ -860,14 +875,19 @@ struct sdw_master_ops {
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
u32 (*read_ping_status)(struct sdw_bus *bus);
-
+ int (*get_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*put_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*new_peripheral_assigned)(struct sdw_bus *bus,
+ struct sdw_slave *slave,
+ int dev_num);
};
/**
* struct sdw_bus - SoundWire bus
* @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
* @md: Master device
- * @link_id: Link id number, can be 0 to N, unique for each Master
+ * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
+ * @link_id: Link id number, can be 0 to N, unique for each Controller
* @id: bus system-wide unique id
* @slaves: list of Slaves on this bus
* @assigned: Bitmap for Slave device numbers.
@@ -883,6 +903,7 @@ struct sdw_master_ops {
* is used to compute and program bus bandwidth, clock, frame shape,
* transport and port parameters
* @debugfs: Bus debugfs
+ * @domain: IRQ domain
* @defer_msg: Defer message
* @clk_stop_timeout: Clock stop timeout computed
* @bank_switch_timeout: Bank switch timeout computed
@@ -894,19 +915,19 @@ struct sdw_master_ops {
* meaningful if multi_link is set. If set to 1, hardware-based
* synchronization will be used even if a stream only uses a single
* SoundWire segment.
- * @dev_num_ida_min: if set, defines the minimum values for the IDA
- * used to allocate system-unique device numbers. This value needs to be
- * identical across all SoundWire bus in the system.
*/
struct sdw_bus {
struct device *dev;
struct sdw_master_device *md;
+ int controller_id;
unsigned int link_id;
int id;
struct list_head slaves;
DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
struct mutex bus_lock;
+ struct lock_class_key bus_lock_key;
struct mutex msg_lock;
+ struct lock_class_key msg_lock_key;
int (*compute_params)(struct sdw_bus *bus);
const struct sdw_master_ops *ops;
const struct sdw_master_port_ops *port_ops;
@@ -916,12 +937,13 @@ struct sdw_bus {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
struct sdw_defer defer_msg;
unsigned int clk_stop_timeout;
u32 bank_switch_timeout;
bool multi_link;
int hw_sync_min_links;
- int dev_num_ida_min;
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
@@ -1020,7 +1042,7 @@ int sdw_compute_params(struct sdw_bus *bus);
int sdw_stream_add_master(struct sdw_bus *bus,
struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream);
int sdw_stream_remove_master(struct sdw_bus *bus,
@@ -1042,7 +1064,7 @@ void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id
int sdw_stream_add_slave(struct sdw_slave *slave,
struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream);
int sdw_stream_remove_slave(struct sdw_slave *slave,
@@ -1064,7 +1086,7 @@ int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
static inline int sdw_stream_add_slave(struct sdw_slave *slave,
struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
+ const struct sdw_port_config *port_config,
unsigned int num_ports,
struct sdw_stream_runtime *stream)
{
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 207701aeeb47..00bb22d96ae5 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -7,6 +7,10 @@
#include <linux/irqreturn.h>
#include <linux/soundwire/sdw.h>
+/*********************************************************************
+ * cAVS and ACE1.x definitions
+ *********************************************************************/
+
#define SDW_SHIM_BASE 0x2C000
#define SDW_ALH_BASE 0x2C800
#define SDW_SHIM_BASE_ACE 0x38000
@@ -101,13 +105,84 @@
#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
+/*********************************************************************
+ * ACE2.x definitions for SHIM registers - only accessible when the
+ * HDAudio extended link LCTL.SPA/CPA = 1.
+ *********************************************************************/
+/* x variable is link index */
+#define SDW_SHIM2_GENERIC_BASE(x) (0x00030000 + 0x8000 * (x))
+#define SDW_IP_BASE(x) (0x00030100 + 0x8000 * (x))
+#define SDW_SHIM2_VS_BASE(x) (0x00036000 + 0x8000 * (x))
+
+/* SHIM2 Generic Registers */
+/* Read-only capabilities */
+#define SDW_SHIM2_LECAP 0x00
+#define SDW_SHIM2_LECAP_HDS BIT(0) /* unset -> Host mode */
+#define SDW_SHIM2_LECAP_MLC GENMASK(3, 1) /* Number of Lanes */
+
+/* PCM Stream capabilities */
+#define SDW_SHIM2_PCMSCAP 0x10
+#define SDW_SHIM2_PCMSCAP_ISS GENMASK(3, 0) /* Input-only streams */
+#define SDW_SHIM2_PCMSCAP_OSS GENMASK(7, 4) /* Output-only streams */
+#define SDW_SHIM2_PCMSCAP_BSS GENMASK(12, 8) /* Bidirectional streams */
+
+/* Read-only PCM Stream Channel Count, y variable is stream */
+#define SDW_SHIM2_PCMSYCHC(y) (0x14 + (0x4 * (y)))
+#define SDW_SHIM2_PCMSYCHC_CS GENMASK(3, 0) /* Channels Supported */
+
+/* PCM Stream Channel Map */
+#define SDW_SHIM2_PCMSYCHM(y) (0x16 + (0x4 * (y)))
+#define SDW_SHIM2_PCMSYCHM_LCHAN GENMASK(3, 0) /* Lowest channel used by the FIFO port */
+#define SDW_SHIM2_PCMSYCHM_HCHAN GENMASK(7, 4) /* Lowest channel used by the FIFO port */
+#define SDW_SHIM2_PCMSYCHM_STRM GENMASK(13, 8) /* HDaudio stream tag */
+#define SDW_SHIM2_PCMSYCHM_DIR BIT(15) /* HDaudio stream direction */
+
+/* SHIM2 vendor-specific registers */
+#define SDW_SHIM2_INTEL_VS_LVSCTL 0x04
+#define SDW_SHIM2_INTEL_VS_LVSCTL_FCG BIT(26)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_MLCS GENMASK(29, 27)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_DCGD BIT(30)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_ICGD BIT(31)
+
+#define SDW_SHIM2_MLCS_XTAL_CLK 0x0
+#define SDW_SHIM2_MLCS_CARDINAL_CLK 0x1
+#define SDW_SHIM2_MLCS_AUDIO_PLL_CLK 0x2
+#define SDW_SHIM2_MLCS_MCLK_INPUT_CLK 0x3
+#define SDW_SHIM2_MLCS_WOV_RING_OSC_CLK 0x4
+
+#define SDW_SHIM2_INTEL_VS_WAKEEN 0x08
+#define SDW_SHIM2_INTEL_VS_WAKEEN_PWE BIT(0)
+
+#define SDW_SHIM2_INTEL_VS_WAKESTS 0x0A
+#define SDW_SHIM2_INTEL_VS_WAKEEN_PWS BIT(0)
+
+#define SDW_SHIM2_INTEL_VS_IOCTL 0x0C
+#define SDW_SHIM2_INTEL_VS_IOCTL_MIF BIT(0)
+#define SDW_SHIM2_INTEL_VS_IOCTL_CO BIT(1)
+#define SDW_SHIM2_INTEL_VS_IOCTL_COE BIT(2)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DO BIT(3)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DOE BIT(4)
+#define SDW_SHIM2_INTEL_VS_IOCTL_BKE BIT(5)
+#define SDW_SHIM2_INTEL_VS_IOCTL_WPDD BIT(6)
+#define SDW_SHIM2_INTEL_VS_IOCTL_ODC BIT(7)
+#define SDW_SHIM2_INTEL_VS_IOCTL_CIBD BIT(8)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DIBD BIT(9)
+#define SDW_SHIM2_INTEL_VS_IOCTL_HAMIFD BIT(10)
+
+#define SDW_SHIM2_INTEL_VS_ACTMCTL 0x0E
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE BIT(0)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODS BIT(1)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE BIT(2)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS GENMASK(4, 3)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE BIT(5)
+
/**
* struct sdw_intel_stream_params_data: configuration passed during
* the @params_stream callback, e.g. for interaction with DSP
* firmware.
*/
struct sdw_intel_stream_params_data {
- int stream;
+ struct snd_pcm_substream *substream;
struct snd_soc_dai *dai;
struct snd_pcm_hw_params *hw_params;
int link_id;
@@ -120,7 +195,7 @@ struct sdw_intel_stream_params_data {
* firmware.
*/
struct sdw_intel_stream_free_data {
- int stream;
+ struct snd_pcm_substream *substream;
struct snd_soc_dai *dai;
int link_id;
};
@@ -134,7 +209,7 @@ struct sdw_intel_ops {
struct sdw_intel_stream_params_data *params_data);
int (*free_stream)(struct device *dev,
struct sdw_intel_stream_free_data *free_data);
- int (*trigger)(struct snd_soc_dai *dai, int cmd, int stream);
+ int (*trigger)(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai);
};
/**
@@ -189,10 +264,7 @@ struct sdw_intel_link_dev;
*/
#define SDW_INTEL_CLK_STOP_BUS_RESET BIT(3)
-struct sdw_intel_slave_id {
- int link_id;
- struct sdw_slave_id id;
-};
+struct hdac_bus;
/**
* struct sdw_intel_ctx - context allocated by the controller
@@ -221,7 +293,7 @@ struct sdw_intel_ctx {
int num_slaves;
acpi_handle handle;
struct sdw_intel_link_dev **ldev;
- struct sdw_intel_slave_id *ids;
+ struct sdw_extended_slave_id *ids;
struct list_head link_list;
struct mutex shim_lock; /* lock for access to shared SHIM registers */
u32 shim_mask;
@@ -248,6 +320,10 @@ struct sdw_intel_ctx {
* DSP driver. The quirks are common for all links for now.
* @shim_base: sdw shim base.
* @alh_base: sdw alh base.
+ * @ext: extended HDaudio link support
+ * @hbus: hdac_bus pointer, needed for power management
+ * @eml_lock: mutex protecting shared registers in the HDaudio multi-link
+ * space
*/
struct sdw_intel_res {
const struct sdw_intel_hw_ops *hw_ops;
@@ -262,6 +338,9 @@ struct sdw_intel_res {
u32 clock_stop_quirks;
u32 shim_base;
u32 alh_base;
+ bool ext;
+ struct hdac_bus *hbus;
+ struct mutex *eml_lock;
};
/*
@@ -315,6 +394,7 @@ struct sdw_intel;
* @sync_go: helper for multi-link synchronization
* @sync_check_cmdsync_unlocked: helper for multi-link synchronization
* and bank switch - shim_lock is assumed to be locked at higher level
+ * @program_sdi: helper for codec command/control based on dev_num
*/
struct sdw_intel_hw_ops {
void (*debugfs_init)(struct sdw_intel *sdw);
@@ -341,8 +421,18 @@ struct sdw_intel_hw_ops {
int (*sync_go_unlocked)(struct sdw_intel *sdw);
int (*sync_go)(struct sdw_intel *sdw);
bool (*sync_check_cmdsync_unlocked)(struct sdw_intel *sdw);
+
+ void (*program_sdi)(struct sdw_intel *sdw, int dev_num);
};
extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
+extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
+
+/*
+ * IDA min selected to allow for 5 unconstrained devices per link,
+ * and 6 system-unique Device Numbers for wake-capable devices.
+ */
+
+#define SDW_INTEL_DEV_NUM_IDA_MIN 6
#endif
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
index d424c1aadf38..a04c1c34c344 100644
--- a/include/linux/spi/ads7846.h
+++ b/include/linux/spi/ads7846.h
@@ -35,8 +35,6 @@ struct ads7846_platform_data {
u16 debounce_tol; /* tolerance used for filtering */
u16 debounce_rep; /* additional consecutive good readings
* required after the first two */
- int gpio_pendown; /* the GPIO used to decide the pendown
- * state if get_pendown_state == NULL */
int gpio_pendown_debounce; /* platform specific debounce time for
* the gpio_pendown */
int (*get_pendown_state)(void);
diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h
index 0b857616919c..fc6c1515dc54 100644
--- a/include/linux/spi/corgi_lcd.h
+++ b/include/linux/spi/corgi_lcd.h
@@ -15,4 +15,6 @@ struct corgi_lcd_platform_data {
void (*kick_battery)(void);
};
+void corgi_lcd_limit_intensity(int limit);
+
#endif /* __LINUX_SPI_CORGI_LCD_H */
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 4658e7801b42..0916cb9bcb0a 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -19,7 +19,7 @@ struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
u8 dma_burst_size;
- bool is_slave;
+ bool is_target;
/* DMA engine specific config */
bool (*dma_filter)(struct dma_chan *chan, void *param);
@@ -31,7 +31,7 @@ struct pxa2xx_spi_controller {
};
/*
- * The controller specific data for SPI slave devices
+ * The controller specific data for SPI target devices
* (resides in spi_board_info.controller_data),
* copied to spi_device.platform_data ... mostly for
* DMA tuning.
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index dc2a0cbd210d..f950d280461b 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -3,8 +3,8 @@
#define __SPI_SH_MSIOF_H__
enum {
- MSIOF_SPI_MASTER,
- MSIOF_SPI_SLAVE,
+ MSIOF_SPI_HOST,
+ MSIOF_SPI_TARGET,
};
struct sh_msiof_spi_info {
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 8e984d75f5b6..f866d5c8ed32 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -101,6 +101,7 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
u16 opcode;
} cmd;
@@ -108,6 +109,7 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
u64 val;
} addr;
@@ -115,12 +117,14 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
} dummy;
struct {
u8 buswidth;
u8 dtr : 1;
u8 ecc : 1;
+ u8 __pad : 6;
enum spi_mem_data_dir dir;
unsigned int nbytes;
union {
@@ -229,6 +233,8 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * not all driver provides supports_op(), so it can return -EOPNOTSUPP
+ * if the op is not supported by the driver/controller
* @get_name: get a custom name for the SPI mem device from the controller.
* This might be needed if the controller driver has been ported
* to use the SPI mem layer and a custom name is used to keep
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index cfe42f8cd7a4..600fbd5daf68 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -6,18 +6,22 @@
#ifndef __LINUX_SPI_H
#define __LINUX_SPI_H
+#include <linux/acpi.h>
#include <linux/bits.h>
+#include <linux/completion.h>
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/kthread.h>
-#include <linux/completion.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
#include <linux/scatterlist.h>
-#include <linux/gpio/consumer.h>
+#include <linux/slab.h>
+#include <linux/u64_stats_sync.h>
#include <uapi/linux/spi/spi.h>
-#include <linux/acpi.h>
-#include <linux/u64_stats_sync.h>
+
+/* Max no. of CS supported per spi device */
+#define SPI_CS_CNT_MAX 16
struct dma_chan;
struct software_node;
@@ -36,7 +40,7 @@ extern struct bus_type spi_bus_type;
/**
* struct spi_statistics - statistics for spi transfers
- * @syncp: seqcount to protect members in this struct for per-cpu udate
+ * @syncp: seqcount to protect members in this struct for per-cpu update
* on 32-bit systems
*
* @messages: number of spi-messages handled
@@ -55,7 +59,7 @@ extern struct bus_type spi_bus_type;
* @bytes_rx: number of bytes received from device
*
* @transfer_bytes_histo:
- * transfer bytes histogramm
+ * transfer bytes histogram
*
* @transfers_split_maxsize:
* number of transfers that have been split because of
@@ -131,7 +135,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Chipselect, distinguishing chips handled by @controller.
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
* @mode: The spi mode defines how data is clocked out and in.
* This may be changed by the device's driver.
* The "active low" default for chipselect mode can be overridden
@@ -156,8 +161,8 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* the device will bind to the named driver and only the named driver.
* Do not set directly, because core frees it; use driver_set_override() to
* set or clear it.
- * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
- * not using a GPIO line)
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
* words of a transfer
* @cs_setup: delay to be introduced by the controller after CS is asserted
@@ -166,6 +171,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* deasserted. If @cs_change_delay is used from @spi_transfer, then the
* two delays will be added up.
* @pcpu_statistics: statistics for the spi_device
+ * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
*
* A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
@@ -181,7 +187,7 @@ struct spi_device {
struct spi_controller *controller;
struct spi_controller *master; /* Compatibility layer */
u32 max_speed_hz;
- u8 chip_select;
+ u8 chip_select[SPI_CS_CNT_MAX];
u8 bits_per_word;
bool rt;
#define SPI_NO_TX BIT(31) /* No transmit wire */
@@ -212,7 +218,7 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- struct gpio_desc *cs_gpiod; /* Chip select gpio desc */
+ struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */
struct spi_delay word_delay; /* Inter-word delay */
/* CS delays */
struct spi_delay cs_setup;
@@ -222,8 +228,15 @@ struct spi_device {
/* The statistics */
struct spi_statistics __percpu *pcpu_statistics;
+ /* Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array.When the controller is capable to handle
+ * multiple chip selects & memories are connected in parallel
+ * then more than one bit need to be set in cs_index_mask.
+ */
+ u32 cs_index_mask : SPI_CS_CNT_MAX;
+
/*
- * likely need more hooks for more protocol options affecting how
+ * Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
* - memory packing (12 bit samples into low bits, others zeroed)
* - priority
@@ -278,32 +291,43 @@ static inline void *spi_get_drvdata(const struct spi_device *spi)
static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx)
{
- return spi->chip_select;
+ return spi->chip_select[idx];
}
static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect)
{
- spi->chip_select = chipselect;
+ spi->chip_select[idx] = chipselect;
}
static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx)
{
- return spi->cs_gpiod;
+ return spi->cs_gpiod[idx];
}
static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod)
{
- spi->cs_gpiod = csgpiod;
+ spi->cs_gpiod[idx] = csgpiod;
+}
+
+static inline bool spi_is_csgpiod(struct spi_device *spi)
+{
+ u8 idx;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ if (spi_get_csgpiod(spi, idx))
+ return true;
+ }
+ return false;
}
/**
* struct spi_driver - Host side "protocol" driver
* @id_table: List of SPI devices supported by this driver
- * @probe: Binds this driver to the spi device. Drivers can verify
+ * @probe: Binds this driver to the SPI device. Drivers can verify
* that the device is actually present, and may need to configure
* characteristics (such as bits_per_word) which weren't needed for
* the initial configuration done during system setup.
- * @remove: Unbinds this driver from the spi device
+ * @remove: Unbinds this driver from the SPI device
* @shutdown: Standard shutdown callback used during system state
* transitions such as powerdown/halt and kexec
* @driver: SPI device drivers should initialize the name and owner
@@ -398,6 +422,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
+ * @multi_cs_cap: indicates that the SPI Controller can assert/de-assert
+ * more than one chip select at once.
* @setup: updates the device mode and clocking records used by a
* device's SPI controller; protocol code may call this. This
* must fail if an unrecognized or unsupported mode is requested.
@@ -415,7 +441,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @queued: whether this controller is providing an internal message queue
* @kworker: pointer to thread struct for message pump
* @pump_messages: work struct for scheduling work to the message pump
- * @queue_lock: spinlock to syncronise access to message queue
+ * @queue_lock: spinlock to synchronise access to message queue
* @queue: message queue
* @cur_msg: the currently in-flight message
* @cur_msg_completion: a completion for the current in-flight message
@@ -460,10 +486,13 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer. Note: transfer_one and
- * transfer_one_message are mutually exclusive; when both
- * are set, the generic subsystem does not call your
- * transfer_one callback.
+ * can issue the next transfer. If the transfer fails, the
+ * driver must set the flag SPI_TRANS_FAIL_IO to
+ * spi_transfer->error first, before calling
+ * spi_finalize_current_transfer().
+ * Note: transfer_one and transfer_one_message are mutually
+ * exclusive; when both are set, the generic subsystem does
+ * not call your transfer_one callback.
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
@@ -473,7 +502,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
* @target_abort: abort the ongoing transfer request on an SPI target controller
- * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
+ * @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
* are not GPIOs (driven by the SPI controller itself).
* @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
@@ -500,7 +529,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* If the driver does not set this, the SPI core takes the snapshot as
* close to the driver hand-over as possible.
* @irq_flags: Interrupt enable state during PTP system timestamping
- * @fallback: fallback to pio if dma transfer return failure with
+ * @fallback: fallback to PIO if DMA transfer return failure with
* SPI_TRANS_FAIL_NO_START.
* @queue_empty: signal green light for opportunistically skipping the queue
* for spi_sync transfers.
@@ -522,15 +551,17 @@ struct spi_controller {
struct list_head list;
- /* Other than negative (== assign one dynamically), bus_num is fully
- * board-specific. usually that simplifies to being SOC-specific.
- * example: one SOC has three SPI controllers, numbered 0..2,
- * and one board's schematics might show it using SPI-2. software
+ /*
+ * Other than negative (== assign one dynamically), bus_num is fully
+ * board-specific. Usually that simplifies to being SoC-specific.
+ * example: one SoC has three SPI controllers, numbered 0..2,
+ * and one board's schematics might show it using SPI-2. Software
* would normally use bus_num=2 for that controller.
*/
s16 bus_num;
- /* chipselects will be integral to many controllers; some others
+ /*
+ * Chipselects will be integral to many controllers; some others
* might use board-specific GPIOs.
*/
u16 num_chipselect;
@@ -562,8 +593,13 @@ struct spi_controller {
#define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */
#define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
-
-#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+ /*
+ * The spi-controller has multi chip select capability and can
+ * assert/de-assert more than one chip select at once.
+ */
+#define SPI_CONTROLLER_MULTI_CS BIT(7)
/* Flag indicating if the allocation of this struct is devres-managed */
bool devm_allocated;
@@ -576,8 +612,8 @@ struct spi_controller {
};
/*
- * on some hardware transfer / message size may be constrained
- * the limit may depend on device transfer settings
+ * On some hardware transfer / message size may be constrained
+ * the limit may depend on device transfer settings.
*/
size_t (*max_transfer_size)(struct spi_device *spi);
size_t (*max_message_size)(struct spi_device *spi);
@@ -595,7 +631,8 @@ struct spi_controller {
/* Flag indicating that the SPI bus is locked for exclusive use */
bool bus_lock_flag;
- /* Setup mode and clock, etc (spi driver may call many times).
+ /*
+ * Setup mode and clock, etc (SPI driver may call many times).
*
* IMPORTANT: this may be called when transfers to another
* device are active. DO NOT UPDATE SHARED REGISTERS in ways
@@ -613,18 +650,19 @@ struct spi_controller {
*/
int (*set_cs_timing)(struct spi_device *spi);
- /* Bidirectional bulk transfers
+ /*
+ * Bidirectional bulk transfers
*
* + The transfer() method may not sleep; its main role is
* just to add the message to the queue.
* + For now there's no remove-from-queue operation, or
* any other request management
- * + To a given spi_device, message queueing is pure fifo
+ * + To a given spi_device, message queueing is pure FIFO
*
* + The controller's main job is to process its message queue,
* selecting a chip (for masters), then transferring data
* + If there are multiple spi_device children, the i/o queue
- * arbitration algorithm is unspecified (round robin, fifo,
+ * arbitration algorithm is unspecified (round robin, FIFO,
* priority, reservations, preemption, etc)
*
* + Chipselect stays active during the entire message
@@ -672,7 +710,8 @@ struct spi_controller {
bool rt;
bool auto_runtime_pm;
bool cur_msg_mapped;
- char last_cs;
+ char last_cs[SPI_CS_CNT_MAX];
+ char last_cs_index_mask;
bool last_cs_mode_high;
bool fallback;
struct completion xfer_completion;
@@ -705,7 +744,7 @@ struct spi_controller {
const struct spi_controller_mem_ops *mem_ops;
const struct spi_controller_mem_caps *mem_caps;
- /* gpio chip select */
+ /* GPIO chip select */
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
s8 unused_native_cs;
@@ -789,7 +828,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
size_t progress, bool irqs_off);
-/* The spi driver core manages memory for the spi_controller classdev */
+/* The SPI driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
unsigned int size, bool slave);
@@ -863,6 +902,7 @@ extern int devm_spi_register_controller(struct device *dev,
extern void spi_unregister_controller(struct spi_controller *ctlr);
#if IS_ENABLED(CONFIG_ACPI)
+extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_device *adev,
int index);
@@ -878,13 +918,13 @@ typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
void *res);
/**
- * struct spi_res - spi resource management structure
+ * struct spi_res - SPI resource management structure
* @entry: list entry
* @release: release code called prior to freeing this resource
* @data: extra data allocated for the specific use-case
*
- * this is based on ideas from devres, but focused on life-cycle
- * management during spi_message processing
+ * This is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing.
*/
struct spi_res {
struct list_head entry;
@@ -902,7 +942,7 @@ struct spi_res {
*
* The spi_messages themselves consist of a series of read+write transfer
* segments. Those segments always read the same number of bits as they
- * write; but one or the other is easily ignored by passing a null buffer
+ * write; but one or the other is easily ignored by passing a NULL buffer
* pointer. (This is unlike most types of I/O API, because SPI hardware
* is full duplex.)
*
@@ -913,8 +953,8 @@ struct spi_res {
/**
* struct spi_transfer - a read/write buffer pair
- * @tx_buf: data to be written (dma-safe memory), or NULL
- * @rx_buf: data to be read (dma-safe memory), or NULL
+ * @tx_buf: data to be written (DMA-safe memory), or NULL
+ * @rx_buf: data to be read (DMA-safe memory), or NULL
* @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
* @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
* @tx_nbits: number of bits used for writing. If 0 the default
@@ -937,7 +977,7 @@ struct spi_res {
* @word_delay: inter word delay to be introduced after each word size
* (set by bits_per_word) transmission.
* @effective_speed_hz: the effective SCK-speed that was used to
- * transfer this transfer. Set to 0 if the spi bus driver does
+ * transfer this transfer. Set to 0 if the SPI bus driver does
* not support it.
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
@@ -966,16 +1006,16 @@ struct spi_res {
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
* @timestamped: true if the transfer has been timestamped
- * @error: Error status logged by spi controller driver.
+ * @error: Error status logged by SPI controller driver.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
* In some cases, they may also want to provide DMA addresses for
* the data being transferred; that may reduce overhead, when the
- * underlying driver uses dma.
+ * underlying driver uses DMA.
*
- * If the transmit buffer is null, zeroes will be shifted out
- * while filling @rx_buf. If the receive buffer is null, the data
+ * If the transmit buffer is NULL, zeroes will be shifted out
+ * while filling @rx_buf. If the receive buffer is NULL, the data
* shifted in will be discarded. Only "len" bytes shift out (or in).
* It's an error to try to shift out a partial word. (For example, by
* shifting out three bytes with word size of sixteen or twenty bits;
@@ -1009,7 +1049,7 @@ struct spi_res {
* Some devices need protocol transactions to be built from a series of
* spi_message submissions, where the content of one message is determined
* by the results of previous messages and where the whole transaction
- * ends when the chipselect goes intactive.
+ * ends when the chipselect goes inactive.
*
* When SPI can transfer in 1x,2x or 4x. It can get this transfer information
* from device through @tx_nbits and @rx_nbits. In Bi-direction, these
@@ -1023,16 +1063,18 @@ struct spi_res {
* and its transfers, ignore them until its completion callback.
*/
struct spi_transfer {
- /* It's ok if tx_buf == rx_buf (right?)
- * for MicroWire, one buffer must be null
- * buffers must work with dma_*map_single() calls, unless
- * spi_message.is_dma_mapped reports a pre-existing mapping
+ /*
+ * It's okay if tx_buf == rx_buf (right?).
+ * For MicroWire, one buffer must be NULL.
+ * Buffers must work with dma_*map_single() calls, unless
+ * spi_message.is_dma_mapped reports a pre-existing mapping.
*/
const void *tx_buf;
void *rx_buf;
unsigned len;
#define SPI_TRANS_FAIL_NO_START BIT(0)
+#define SPI_TRANS_FAIL_IO BIT(1)
u16 error;
dma_addr_t tx_dma;
@@ -1046,9 +1088,9 @@ struct spi_transfer {
unsigned tx_nbits:3;
unsigned rx_nbits:3;
unsigned timestamped:1;
-#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
-#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
-#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
+#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
+#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
+#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
u8 bits_per_word;
struct spi_delay delay;
struct spi_delay cs_change_delay;
@@ -1069,7 +1111,7 @@ struct spi_transfer {
* struct spi_message - one multi-segment SPI transaction
* @transfers: list of transfer segments in this transaction
* @spi: SPI device to which the transaction is queued
- * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
+ * @is_dma_mapped: if true, the caller provided both DMA and CPU virtual
* addresses for each transfer buffer
* @complete: called to report transaction completions
* @context: the argument to complete() when it's called
@@ -1079,7 +1121,7 @@ struct spi_transfer {
* @status: zero for success, else negative errno
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
- * @resources: for resource management when the spi message is processed
+ * @resources: for resource management when the SPI message is processed
* @prepared: spi_prepare_message was called for the this message
*
* A @spi_message is used to execute an atomic sequence of data transfers,
@@ -1106,7 +1148,8 @@ struct spi_message {
/* spi_prepare_message() was called for this message */
bool prepared;
- /* REVISIT: we might want a flag affecting the behavior of the
+ /*
+ * REVISIT: we might want a flag affecting the behavior of the
* last transfer ... allowing things like "read 16 bit length L"
* immediately followed by "read L bytes". Basically imposing
* a specific message scheduling algorithm.
@@ -1124,14 +1167,15 @@ struct spi_message {
unsigned frame_length;
unsigned actual_length;
- /* For optional use by whatever driver currently owns the
+ /*
+ * For optional use by whatever driver currently owns the
* spi_message ... between calls to spi_async and then later
* complete(), that's the spi_controller controller driver.
*/
struct list_head queue;
void *state;
- /* List of spi_res reources when the spi message is processed */
+ /* List of spi_res resources when the SPI message is processed */
struct list_head resources;
};
@@ -1168,7 +1212,7 @@ spi_transfer_delay_exec(struct spi_transfer *t)
/**
* spi_message_init_with_transfers - Initialize spi_message and append transfers
* @m: spi_message to be initialized
- * @xfers: An array of spi transfers
+ * @xfers: An array of SPI transfers
* @num_xfers: Number of items in the xfer array
*
* This function initializes the given spi_message and adds each spi_transfer in
@@ -1185,26 +1229,27 @@ struct spi_transfer *xfers, unsigned int num_xfers)
spi_message_add_tail(&xfers[i], m);
}
-/* It's fine to embed message and transaction structures in other data
+/*
+ * It's fine to embed message and transaction structures in other data
* structures so long as you don't free them while they're in use.
*/
-
static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
{
- struct spi_message *m;
-
- m = kzalloc(sizeof(struct spi_message)
- + ntrans * sizeof(struct spi_transfer),
- flags);
- if (m) {
- unsigned i;
- struct spi_transfer *t = (struct spi_transfer *)(m + 1);
-
- spi_message_init_no_memset(m);
- for (i = 0; i < ntrans; i++, t++)
- spi_message_add_tail(t, m);
- }
- return m;
+ struct spi_message_with_transfers {
+ struct spi_message m;
+ struct spi_transfer t[];
+ } *mwt;
+ unsigned i;
+
+ mwt = kzalloc(struct_size(mwt, t, ntrans), flags);
+ if (!mwt)
+ return NULL;
+
+ spi_message_init_no_memset(&mwt->m);
+ for (i = 0; i < ntrans; i++)
+ spi_message_add_tail(&mwt->t[i], &mwt->m);
+
+ return &mwt->m;
}
static inline void spi_message_free(struct spi_message *m)
@@ -1261,6 +1306,23 @@ static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
return false;
}
+/**
+ * spi_controller_xfer_timeout - Compute a suitable timeout value
+ * @ctlr: SPI device
+ * @xfer: Transfer descriptor
+ *
+ * Compute a relevant timeout value for the given transfer. We derive the time
+ * that it would take on a single data line and take twice this amount of time
+ * with a minimum of 500ms to avoid false positives on loaded systems.
+ *
+ * Returns: Transfer timeout value in milliseconds.
+ */
+static inline unsigned int spi_controller_xfer_timeout(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ return max(xfer->len * 8 * 2 / (xfer->speed_hz / 1000), 500U);
+}
+
/*---------------------------------------------------------------------------*/
/* SPI transfer replacement methods which make use of spi_res */
@@ -1274,7 +1336,7 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* replacements that have occurred
* so that they can get reverted
* @release: some extra release code to get executed prior to
- * relasing this structure
+ * releasing this structure
* @extradata: pointer to some extra data if requested or NULL
* @replaced_transfers: transfers that have been replaced and which need
* to get restored
@@ -1284,9 +1346,9 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* @inserted_transfers: array of spi_transfers of array-size @inserted,
* that have been replacing replaced_transfers
*
- * note: that @extradata will point to @inserted_transfers[@inserted]
+ * Note: that @extradata will point to @inserted_transfers[@inserted]
* if some extra allocation is requested, so alignment will be the same
- * as for spi_transfers
+ * as for spi_transfers.
*/
struct spi_replaced_transfers {
spi_replaced_release_t release;
@@ -1312,7 +1374,8 @@ extern int spi_split_transfers_maxwords(struct spi_controller *ctlr,
/*---------------------------------------------------------------------------*/
-/* All these synchronous SPI transfer routines are utilities layered
+/*
+ * All these synchronous SPI transfer routines are utilities layered
* over the core async transfer primitive. Here, "synchronous" means
* they will sleep uninterruptibly until the async transfer completes.
*/
@@ -1455,7 +1518,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
*
* Callable only from contexts that can sleep.
*
- * Return: the (unsigned) sixteen bit number returned by the device in cpu
+ * Return: the (unsigned) sixteen bit number returned by the device in CPU
* endianness, or else a negative error code.
*/
static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
@@ -1483,7 +1546,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* As a rule, SPI devices can't be probed. Instead, board init code
* provides a table listing the devices which are present, with enough
* information to bind and set up the device's driver. There's basic
- * support for nonstatic configurations too; enough to handle adding
+ * support for non-static configurations too; enough to handle adding
* parport adapters, or microcontrollers acting as USB-to-SPI bridges.
*/
@@ -1520,12 +1583,13 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* are active in some dynamic board configuration models.
*/
struct spi_board_info {
- /* The device name and module name are coupled, like platform_bus;
+ /*
+ * The device name and module name are coupled, like platform_bus;
* "modalias" is normally the driver name.
*
* platform_data goes to spi_device.dev.platform_data,
* controller_data goes to spi_device.controller_data,
- * irq is copied too
+ * IRQ is copied too.
*/
char modalias[SPI_NAME_SIZE];
const void *platform_data;
@@ -1537,7 +1601,8 @@ struct spi_board_info {
u32 max_speed_hz;
- /* bus_num is board specific and matches the bus_num of some
+ /*
+ * bus_num is board specific and matches the bus_num of some
* spi_controller that will probably be registered later.
*
* chip_select reflects how this chip is wired to that master;
@@ -1546,12 +1611,14 @@ struct spi_board_info {
u16 bus_num;
u16 chip_select;
- /* mode becomes spi_device.mode, and is essential for chips
+ /*
+ * mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
u32 mode;
- /* ... may need additional spi_device chip config data here.
+ /*
+ * ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
* needed to behave without being bound to a driver:
* - quirks like clock rate mattering when not selected
@@ -1568,7 +1635,8 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
{ return 0; }
#endif
-/* If you're hotplugging an adapter with devices (parport, usb, etc)
+/*
+ * If you're hotplugging an adapter with devices (parport, USB, etc)
* use spi_new_device() to describe each device. You can also call
* spi_unregister_device() to start making that device vanish, but
* normally that would be handled by spi_unregister_controller().
@@ -1605,12 +1673,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
/* Compatibility layer */
#define spi_master spi_controller
-#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
-#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX
-#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX
-#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX
-#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX
-
#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
#define spi_master_set_devdata(_ctlr, _data) \
spi_controller_set_devdata(_ctlr, _data)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index be48f1cb1878..3fcd20de6ca8 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -61,6 +61,7 @@
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>
@@ -455,6 +456,37 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
#endif /* CONFIG_PREEMPT_RT */
/*
+ * Does a critical section need to be broken due to another
+ * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
+ * but a general need for low latency)
+ */
+static inline int spin_needbreak(spinlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return spin_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return rwlock_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
+/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
*/
@@ -502,5 +534,76 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
void free_bucket_spinlocks(spinlock_t *locks);
+DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
+ raw_spin_lock(_T->lock),
+ raw_spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
+ raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
+ raw_spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
+ raw_spin_lock_irq(_T->lock),
+ raw_spin_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
+ raw_spin_lock_irqsave(_T->lock, _T->flags),
+ raw_spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
+ raw_spin_trylock_irqsave(_T->lock, _T->flags))
+
+DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
+ spin_lock(_T->lock),
+ spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
+ spin_lock_irq(_T->lock),
+ spin_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
+ spin_trylock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
+ spin_lock_irqsave(_T->lock, _T->flags),
+ spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
+ spin_trylock_irqsave(_T->lock, _T->flags))
+
+DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
+ read_lock(_T->lock),
+ read_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
+ read_lock_irq(_T->lock),
+ read_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
+ read_lock_irqsave(_T->lock, _T->flags),
+ read_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
+ write_lock(_T->lock),
+ write_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
+ write_lock_irq(_T->lock),
+ write_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
+ write_lock_irqsave(_T->lock, _T->flags),
+ write_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index a55179fd60fc..9dec4861d09f 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -38,6 +38,7 @@ struct splice_desc {
struct file *file; /* file to read/write */
void *data; /* cookie */
} u;
+ void (*splice_eof)(struct splice_desc *sd); /* Unexpected EOF handler */
loff_t pos; /* file position */
loff_t *opos; /* sendfile: output position */
size_t num_spliced; /* number of bytes already spliced */
@@ -67,23 +68,37 @@ typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
typedef int (splice_direct_actor)(struct pipe_inode_info *,
struct splice_desc *);
-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int,
- splice_actor *);
-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
- struct splice_desc *, splice_actor *);
-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
- struct splice_pipe_desc *);
-extern ssize_t add_to_pipe(struct pipe_inode_info *,
- struct pipe_buffer *);
-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
- splice_direct_actor *);
-extern long do_splice(struct file *in, loff_t *off_in,
- struct file *out, loff_t *off_out,
- size_t len, unsigned int flags);
+ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags,
+ splice_actor *actor);
+ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
+ struct splice_desc *sd, splice_actor *actor);
+ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf);
+ssize_t vfs_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+ssize_t splice_direct_to_actor(struct file *file, struct splice_desc *sd,
+ splice_direct_actor *actor);
+ssize_t do_splice(struct file *in, loff_t *off_in, struct file *out,
+ loff_t *off_out, size_t len, unsigned int flags);
+ssize_t do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len, unsigned int flags);
+ssize_t splice_file_range(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len);
-extern long do_tee(struct file *in, struct file *out, size_t len,
- unsigned int flags);
+static inline long splice_copy_file_range(struct file *in, loff_t pos_in,
+ struct file *out, loff_t pos_out,
+ size_t len)
+{
+ return splice_file_range(in, &pos_in, out, &pos_out, len);
+}
+
+ssize_t do_tee(struct file *in, struct file *out, size_t len,
+ unsigned int flags);
+ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
/*
* for dynamic pipe sizing
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index eac1956a8727..28e8c8bd3944 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -120,6 +120,9 @@ static inline void spmi_controller_put(struct spmi_controller *ctrl)
int spmi_controller_add(struct spmi_controller *ctrl);
void spmi_controller_remove(struct spmi_controller *ctrl);
+struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size);
+int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl);
+
/**
* struct spmi_driver - SPMI slave device driver
* @driver: SPMI device drivers should initialize name and owner field of
@@ -166,7 +169,7 @@ static inline void spmi_driver_unregister(struct spmi_driver *sdrv)
struct device_node;
-struct spmi_device *spmi_device_from_of(struct device_node *np);
+struct spmi_device *spmi_find_device_by_of_node(struct device_node *np);
int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf);
int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
size_t len);
diff --git a/include/linux/sprintf.h b/include/linux/sprintf.h
new file mode 100644
index 000000000000..33dcbec71925
--- /dev/null
+++ b/include/linux/sprintf.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KERNEL_SPRINTF_H_
+#define _LINUX_KERNEL_SPRINTF_H_
+
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+int num_to_str(char *buf, int size, unsigned long long num, unsigned int width);
+
+__printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
+__printf(2, 0) int vsprintf(char *buf, const char *, va_list);
+__printf(3, 4) int snprintf(char *buf, size_t size, const char *fmt, ...);
+__printf(3, 0) int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+__printf(3, 4) int scnprintf(char *buf, size_t size, const char *fmt, ...);
+__printf(3, 0) int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+__printf(2, 3) __malloc char *kasprintf(gfp_t gfp, const char *fmt, ...);
+__printf(2, 0) __malloc char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+__printf(2, 0) const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
+
+__scanf(2, 3) int sscanf(const char *, const char *, ...);
+__scanf(2, 0) int vsscanf(const char *, const char *, va_list);
+
+/* These are for specific cases, do not use without real need */
+extern bool no_hash_pointers;
+int no_hash_pointers_enable(char *str);
+
+#endif /* _LINUX_KERNEL_SPRINTF_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 41c4b26fb1c1..236610e4a8fa 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -212,7 +212,7 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
srcu_check_nmi_safety(ssp, false);
retval = __srcu_read_lock(ssp);
- srcu_lock_acquire(&(ssp)->dep_map);
+ srcu_lock_acquire(&ssp->dep_map);
return retval;
}
@@ -229,7 +229,7 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
srcu_check_nmi_safety(ssp, true);
retval = __srcu_read_lock_nmisafe(ssp);
- rcu_lock_acquire(&(ssp)->dep_map);
+ rcu_try_lock_acquire(&ssp->dep_map);
return retval;
}
@@ -284,7 +284,7 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_nmi_safety(ssp, false);
- srcu_lock_release(&(ssp)->dep_map);
+ srcu_lock_release(&ssp->dep_map);
__srcu_read_unlock(ssp, idx);
}
@@ -300,7 +300,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
{
WARN_ON_ONCE(idx & ~0x1);
srcu_check_nmi_safety(ssp, true);
- rcu_lock_release(&(ssp)->dep_map);
+ rcu_lock_release(&ssp->dep_map);
__srcu_read_unlock_nmisafe(ssp, idx);
}
@@ -343,4 +343,9 @@ static inline void smp_mb__after_srcu_read_unlock(void)
/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
}
+DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
+ _T->idx = srcu_read_lock(_T->lock),
+ srcu_read_unlock(_T->lock, _T->idx),
+ int idx)
+
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index ebd72491af99..447133171d95 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -48,6 +48,10 @@ void srcu_drive_gp(struct work_struct *wp);
#define DEFINE_STATIC_SRCU(name) \
static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
+// Dummy structure for srcu_notifier_head.
+struct srcu_usage { };
+#define __SRCU_USAGE_INIT(name) { }
+
void synchronize_srcu(struct srcu_struct *ssp);
/*
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index e58306783d8e..adcbb8f23600 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -11,8 +11,6 @@
* SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
* stack traces often repeat, using stack depot allows to save about 100x space.
*
- * Stack traces are never removed from the stack depot.
- *
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
@@ -32,6 +30,18 @@ typedef u32 depot_stack_handle_t;
*/
#define STACK_DEPOT_EXTRA_BITS 5
+typedef u32 depot_flags_t;
+
+/*
+ * Flags that can be passed to stack_depot_save_flags(); see the comment next
+ * to its declaration for more details.
+ */
+#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001)
+#define STACK_DEPOT_FLAG_GET ((depot_flags_t)0x0002)
+
+#define STACK_DEPOT_FLAGS_NUM 2
+#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1))
+
/*
* Using stack depot requires its initialization, which can be done in 3 ways:
*
@@ -69,31 +79,39 @@ static inline int stack_depot_early_init(void) { return 0; }
#endif
/**
- * __stack_depot_save - Save a stack trace to stack depot
+ * stack_depot_save_flags - Save a stack trace to stack depot
*
* @entries: Pointer to the stack trace
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
- * @can_alloc: Allocate stack pools (increased chance of failure if false)
+ * @depot_flags: Stack depot flags
+ *
+ * Saves a stack trace from @entries array of size @nr_entries.
+ *
+ * If STACK_DEPOT_FLAG_CAN_ALLOC is set in @depot_flags, stack depot can
+ * replenish the stack pools in case no space is left (allocates using GFP
+ * flags of @alloc_flags). Otherwise, stack depot avoids any allocations and
+ * fails if no space is left to store the stack trace.
*
- * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
- * %true, stack depot can replenish the stack pools in case no space is left
- * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
- * any allocations and fails if no space is left to store the stack trace.
+ * If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment
+ * the refcount on the saved stack trace if it already exists in stack depot.
+ * Users of this flag must also call stack_depot_put() when keeping the stack
+ * trace is no longer required to avoid overflowing the refcount.
*
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
*
- * Context: Any context, but setting @can_alloc to %false is required if
+ * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
*
* Return: Handle of the stack struct stored in depot, 0 on failure
*/
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t gfp_flags, bool can_alloc);
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t gfp_flags,
+ depot_flags_t depot_flags);
/**
* stack_depot_save - Save a stack trace to stack depot
@@ -102,8 +120,11 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
*
- * Context: Contexts where allocations via alloc_pages() are allowed.
- * See __stack_depot_save() for more details.
+ * Does not increment the refcount on the saved stack trace; see
+ * stack_depot_save_flags() for more details.
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed;
+ * see stack_depot_save_flags() for more details.
*
* Return: Handle of the stack trace stored in depot, 0 on failure
*/
@@ -142,6 +163,18 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
/**
+ * stack_depot_put - Drop a reference to a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ *
+ * The stack trace is evicted from stack depot once all references to it have
+ * been dropped (once the number of stack_depot_evict() calls matches the
+ * number of stack_depot_save_flags() calls with STACK_DEPOT_FLAG_GET set for
+ * this stack trace).
+ */
+void stack_depot_put(depot_stack_handle_t handle);
+
+/**
* stack_depot_set_extra_bits - Set extra bits in a stack depot handle
*
* @handle: Stack depot handle returned from stack_depot_save()
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
index c36e7a3b45e7..3be2cb564710 100644
--- a/include/linux/stackleak.h
+++ b/include/linux/stackleak.h
@@ -14,6 +14,7 @@
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
#include <asm/stacktrace.h>
+#include <linux/linkage.h>
/*
* The lowest address on tsk's stack which we can plausibly erase.
@@ -76,6 +77,11 @@ static inline void stackleak_task_init(struct task_struct *t)
# endif
}
+asmlinkage void noinstr stackleak_erase(void);
+asmlinkage void noinstr stackleak_erase_on_task_stack(void);
+asmlinkage void noinstr stackleak_erase_off_task_stack(void);
+void __no_caller_saved_registers noinstr stackleak_track_stack(void);
+
#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
static inline void stackleak_task_init(struct task_struct *t) { }
#endif
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 225751a8fd8e..dee5ad6e48c5 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -76,6 +76,8 @@
| DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \
| DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256)
+struct stmmac_priv;
+
/* Platfrom data for platform device structure's platform_data field */
struct stmmac_mdio_bus_data {
@@ -137,6 +139,7 @@ struct stmmac_rxq_cfg {
struct stmmac_txq_cfg {
u32 weight;
+ bool coe_unsupported;
u8 mode_to_use;
/* Credit Base Shaper parameters */
u32 send_slope;
@@ -172,6 +175,7 @@ struct stmmac_fpe_cfg {
bool hs_enable; /* FPE handshake enable */
enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
+ u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */
};
struct stmmac_safety_feature_cfg {
@@ -204,14 +208,41 @@ struct dwmac4_addrs {
u32 mtl_low_cred_offset;
};
+#define STMMAC_FLAG_HAS_INTEGRATED_PCS BIT(0)
+#define STMMAC_FLAG_SPH_DISABLE BIT(1)
+#define STMMAC_FLAG_USE_PHY_WOL BIT(2)
+#define STMMAC_FLAG_HAS_SUN8I BIT(3)
+#define STMMAC_FLAG_TSO_EN BIT(4)
+#define STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP BIT(5)
+#define STMMAC_FLAG_VLAN_FAIL_Q_EN BIT(6)
+#define STMMAC_FLAG_MULTI_MSI_EN BIT(7)
+#define STMMAC_FLAG_EXT_SNAPSHOT_EN BIT(8)
+#define STMMAC_FLAG_INT_SNAPSHOT_EN BIT(9)
+#define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10)
+#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11)
+#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(12)
+
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
- int interface;
+ /* MAC ----- optional PCS ----- SerDes ----- optional PHY ----- Media
+ * ^ ^
+ * mac_interface phy_interface
+ *
+ * mac_interface is the MAC-side interface, which may be the same
+ * as phy_interface if there is no intervening PCS. If there is a
+ * PCS, then mac_interface describes the interface mode between the
+ * MAC and PCS, and phy_interface describes the interface mode
+ * between the PCS and PHY.
+ */
+ phy_interface_t mac_interface;
+ /* phy_interface is the PHY-side interface - the interface used by
+ * an attached PHY.
+ */
phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
- struct device_node *phylink_node;
+ struct fwnode_handle *port_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
struct stmmac_est *est;
@@ -240,12 +271,12 @@ struct plat_stmmacenet_data {
u8 tx_sched_algorithm;
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
- void (*fix_mac_speed)(void *priv, unsigned int speed);
+ void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
void (*speed_mode_2500)(struct net_device *ndev, void *priv);
- void (*ptp_clk_freq_config)(void *priv);
+ void (*ptp_clk_freq_config)(struct stmmac_priv *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
struct mac_device_info *(*setup)(void *priv);
@@ -266,22 +297,13 @@ struct plat_stmmacenet_data {
struct reset_control *stmmac_ahb_rst;
struct stmmac_axi *axi;
int has_gmac4;
- bool has_sun8i;
- bool tso_en;
int rss_en;
int mac_port_sel_speed;
- bool en_tx_lpi_clockgating;
- bool rx_clk_runs_in_lpi;
int has_xgmac;
- bool vlan_fail_q_en;
u8 vlan_fail_q;
unsigned int eee_usecs_rate;
struct pci_dev *pdev;
int int_snapshot_num;
- int ext_snapshot_num;
- bool int_snapshot_en;
- bool ext_snapshot_en;
- bool multi_msi_en;
int msi_mac_vec;
int msi_wol_vec;
int msi_lpi_vec;
@@ -289,9 +311,7 @@ struct plat_stmmacenet_data {
int msi_sfty_ue_vec;
int msi_rx_base_vec;
int msi_tx_base_vec;
- bool use_phy_wol;
- bool sph_disable;
- bool serdes_up_after_phy_linkup;
const struct dwmac4_addrs *dwmac4_addrs;
+ unsigned int flags;
};
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index c062c581a98b..ab148d8dbfc1 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -2,10 +2,13 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
+#include <linux/array_size.h>
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
+#include <linux/err.h> /* for ERR_PTR() */
#include <linux/errno.h> /* for E2BIG */
+#include <linux/overflow.h> /* for check_mul_overflow() */
#include <linux/stdarg.h>
#include <uapi/linux/string.h>
@@ -14,6 +17,44 @@ extern void *memdup_user(const void __user *, size_t);
extern void *vmemdup_user(const void __user *, size_t);
extern void *memdup_user_nul(const void __user *, size_t);
+/**
+ * memdup_array_user - duplicate array from user space
+ * @src: source address in user space
+ * @n: number of array members to copy
+ * @size: size of one array member
+ *
+ * Return: an ERR_PTR() on failure. Result is physically
+ * contiguous, to be freed by kfree().
+ */
+static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
+{
+ size_t nbytes;
+
+ if (check_mul_overflow(n, size, &nbytes))
+ return ERR_PTR(-EOVERFLOW);
+
+ return memdup_user(src, nbytes);
+}
+
+/**
+ * vmemdup_array_user - duplicate array from user space
+ * @src: source address in user space
+ * @n: number of array members to copy
+ * @size: size of one array member
+ *
+ * Return: an ERR_PTR() on failure. Result may be not
+ * physically contiguous. Use kvfree() to free.
+ */
+static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
+{
+ size_t nbytes;
+
+ if (check_mul_overflow(n, size, &nbytes))
+ return ERR_PTR(-EOVERFLOW);
+
+ return vmemdup_user(src, nbytes);
+}
+
/*
* Include machine specific inline routines
*/
@@ -25,9 +66,6 @@ extern char * strcpy(char *,const char *);
#ifndef __HAVE_ARCH_STRNCPY
extern char * strncpy(char *,const char *, __kernel_size_t);
#endif
-#ifndef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *, const char *, size_t);
-#endif
#ifndef __HAVE_ARCH_STRSCPY
ssize_t strscpy(char *, const char *, size_t);
#endif
@@ -169,7 +207,7 @@ static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
#endif
void *memchr_inv(const void *s, int c, size_t n);
-char *strreplace(char *s, char old, char new);
+char *strreplace(char *str, char old, char new);
extern void kfree_const(const void *x);
@@ -277,10 +315,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
*/
#define strtomem_pad(dest, src, pad) do { \
const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
- memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
+ memcpy_and_pad(dest, _dest_len, src, \
+ strnlen(src, min(_src_len, _dest_len)), pad); \
} while (0)
/**
@@ -298,10 +338,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
*/
#define strtomem(dest, src) do { \
const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
- memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \
+ memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \
} while (0)
/**
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
new file mode 100644
index 000000000000..3c1091941eb8
--- /dev/null
+++ b/include/linux/string_choices.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STRING_CHOICES_H_
+#define _LINUX_STRING_CHOICES_H_
+
+#include <linux/types.h>
+
+static inline const char *str_enable_disable(bool v)
+{
+ return v ? "enable" : "disable";
+}
+
+static inline const char *str_enabled_disabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
+static inline const char *str_hi_lo(bool v)
+{
+ return v ? "hi" : "lo";
+}
+#define str_lo_hi(v) str_hi_lo(!(v))
+
+static inline const char *str_high_low(bool v)
+{
+ return v ? "high" : "low";
+}
+#define str_low_high(v) str_high_low(!(v))
+
+static inline const char *str_read_write(bool v)
+{
+ return v ? "read" : "write";
+}
+#define str_write_read(v) str_read_write(!(v))
+
+static inline const char *str_on_off(bool v)
+{
+ return v ? "on" : "off";
+}
+
+static inline const char *str_yes_no(bool v)
+{
+ return v ? "yes" : "no";
+}
+
+#endif
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index fae6beaaa217..58fb1f90eda5 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -4,6 +4,7 @@
#include <linux/bits.h>
#include <linux/ctype.h>
+#include <linux/string_choices.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -23,8 +24,8 @@ enum string_size_units {
STRING_UNITS_2, /* use binary powers of 2^10 */
};
-void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
- char *buf, int len);
+int string_get_size(u64 size, u64 blk_size, enum string_size_units units,
+ char *buf, int len);
int parse_int_array_user(const char __user *from, size_t count, int **array);
@@ -108,34 +109,11 @@ char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+char *kstrdup_and_replace(const char *src, char old, char new, gfp_t gfp);
+
char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n);
void kfree_strarray(char **array, size_t n);
char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n);
-static inline const char *str_yes_no(bool v)
-{
- return v ? "yes" : "no";
-}
-
-static inline const char *str_on_off(bool v)
-{
- return v ? "on" : "off";
-}
-
-static inline const char *str_enable_disable(bool v)
-{
- return v ? "enable" : "disable";
-}
-
-static inline const char *str_enabled_disabled(bool v)
-{
- return v ? "enabled" : "disabled";
-}
-
-static inline const char *str_read_write(bool v)
-{
- return v ? "read" : "write";
-}
-
#endif
diff --git a/include/linux/stringify.h b/include/linux/stringify.h
index 841cec8ed525..0e84cbe65270 100644
--- a/include/linux/stringify.h
+++ b/include/linux/stringify.h
@@ -9,4 +9,6 @@
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
+#define FILE_LINE __FILE__ ":" __stringify(__LINE__)
+
#endif /* !__LINUX_STRINGIFY_H */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 3e6ce288a7fc..61e58327b1aa 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -120,6 +120,7 @@ struct rpc_authops {
struct rpcsec_gss_info *);
int (*key_timeout)(struct rpc_auth *,
struct rpc_cred *);
+ int (*ping)(struct rpc_clnt *clnt);
};
struct rpc_credops {
@@ -144,6 +145,7 @@ struct rpc_credops {
extern const struct rpc_authops authunix_ops;
extern const struct rpc_authops authnull_ops;
+extern const struct rpc_authops authtls_ops;
int __init rpc_init_authunix(void);
int __init rpcauth_init_module(void);
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index db30a159f9d5..f22bf915dcf6 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -20,7 +20,8 @@
#ifdef CONFIG_SUNRPC_BACKCHANNEL
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
-void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
+void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
+ const struct rpc_timeout *to);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 518bd28f5ab8..35766963dd14 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -56,10 +56,14 @@ struct cache_head {
struct kref ref;
unsigned long flags;
};
-#define CACHE_VALID 0 /* Entry contains valid data */
-#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
-#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
-#define CACHE_CLEANED 3 /* Entry has been cleaned from cache */
+
+/* cache_head.flags */
+enum {
+ CACHE_VALID, /* Entry contains valid data */
+ CACHE_NEGATIVE, /* Negative entry - there is no match for the key */
+ CACHE_PENDING, /* An upcall has been sent but no reply received yet*/
+ CACHE_CLEANED, /* Entry has been cleaned from cache */
+};
#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 770ef2cb5775..5e9d1469c6fa 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -30,7 +30,13 @@
#include <linux/sunrpc/xprtmultipath.h>
struct rpc_inode;
-struct rpc_sysfs_client;
+struct rpc_sysfs_client {
+ struct kobject kobject;
+ struct net *net;
+ struct rpc_clnt *clnt;
+ struct rpc_xprt_switch *xprt_switch;
+};
+
/*
* The high-level client handle
@@ -57,7 +63,9 @@ struct rpc_clnt {
cl_discrtry : 1,/* disconnect before retry */
cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
- cl_chatty : 1;/* be verbose */
+ cl_chatty : 1,/* be verbose */
+ cl_shutdown : 1;/* rpc immediate -EIO */
+ struct xprtsec_parms cl_xprtsec; /* transport security policy */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
@@ -84,6 +92,7 @@ struct rpc_clnt {
};
const struct cred *cl_cred;
unsigned int cl_max_connect; /* max number of transports not to the same IP */
+ struct super_block *pipefs_sb;
};
/*
@@ -139,6 +148,9 @@ struct rpc_create_args {
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
const struct cred *cred;
unsigned int max_connect;
+ struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct rpc_add_xprt_test {
@@ -240,7 +252,6 @@ void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *,
const char *rpc_proc_name(const struct rpc_task *task);
-void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 8ada7dc802d3..2d61987b3545 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -38,6 +38,17 @@ struct rpc_wait {
};
/*
+ * This describes a timeout strategy
+ */
+struct rpc_timeout {
+ unsigned long to_initval, /* initial timeout */
+ to_maxval, /* max timeout */
+ to_increment; /* if !exponential */
+ unsigned int to_retries; /* max # of retries */
+ unsigned char to_exponential;
+};
+
+/*
* This is the RPC task struct
*/
struct rpc_task {
@@ -205,7 +216,8 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
-struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ struct rpc_timeout *timeout);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
index d94d4f410507..3ce1550d1beb 100644
--- a/include/linux/sunrpc/stats.h
+++ b/include/linux/sunrpc/stats.h
@@ -43,22 +43,6 @@ struct net;
#ifdef CONFIG_PROC_FS
int rpc_proc_init(struct net *);
void rpc_proc_exit(struct net *);
-#else
-static inline int rpc_proc_init(struct net *net)
-{
- return 0;
-}
-
-static inline void rpc_proc_exit(struct net *net)
-{
-}
-#endif
-
-#ifdef MODULE
-void rpc_modcount(struct inode *, int);
-#endif
-
-#ifdef CONFIG_PROC_FS
struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *);
void rpc_proc_unregister(struct net *,const char *);
void rpc_proc_zero(const struct rpc_program *);
@@ -69,7 +53,14 @@ void svc_proc_unregister(struct net *, const char *);
void svc_seq_show(struct seq_file *,
const struct svc_stat *);
#else
+static inline int rpc_proc_init(struct net *net)
+{
+ return 0;
+}
+static inline void rpc_proc_exit(struct net *net)
+{
+}
static inline struct proc_dir_entry *rpc_proc_register(struct net *net, struct rpc_stat *s) { return NULL; }
static inline void rpc_proc_unregister(struct net *net, const char *p) {}
static inline void rpc_proc_zero(const struct rpc_program *p) {}
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 762d7231e574..67cf1c9efd80 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -17,6 +17,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/svcauth.h>
+#include <linux/lwq.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
@@ -33,22 +34,27 @@
*/
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
- spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_sockets; /* pending sockets */
- unsigned int sp_nrthreads; /* # of threads in pool */
+ struct lwq sp_xprts; /* pending transports */
+ atomic_t sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
+ struct llist_head sp_idle_threads; /* idle server threads */
/* statistics on pool operation */
+ struct percpu_counter sp_messages_arrived;
struct percpu_counter sp_sockets_queued;
struct percpu_counter sp_threads_woken;
- struct percpu_counter sp_threads_timedout;
-#define SP_TASK_PENDING (0) /* still work to do even if no
- * xprt is queued. */
-#define SP_CONGESTED (1)
unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
+/* bits for sp_flags */
+enum {
+ SP_TASK_PENDING, /* still work to do even if no xprt is queued */
+ SP_NEED_VICTIM, /* One thread needs to agree to exit */
+ SP_VICTIM_REMAINS, /* One thread needs to actually exit */
+};
+
+
/*
* RPC service.
*
@@ -63,7 +69,6 @@ struct svc_serv {
struct svc_program * sv_program; /* RPC program */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
- struct kref sv_refcnt;
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_maxconn; /* max connections allowed or
* '0' causing max to be based
@@ -84,54 +89,20 @@ struct svc_serv {
int (*sv_threadfn)(void *data);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head sv_cb_list; /* queue for callback requests
+ struct lwq sv_cb_list; /* queue for callback requests
* that arrive over the same
* connection */
- spinlock_t sv_cb_lock; /* protects the svc_cb_list */
- wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
- * entries in the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
-/**
- * svc_get() - increment reference count on a SUNRPC serv
- * @serv: the svc_serv to have count incremented
- *
- * Returns: the svc_serv that was passed in.
- */
-static inline struct svc_serv *svc_get(struct svc_serv *serv)
-{
- kref_get(&serv->sv_refcnt);
- return serv;
-}
-
-void svc_destroy(struct kref *);
+/* This is used by pool_stats to find and lock an svc */
+struct svc_info {
+ struct svc_serv *serv;
+ struct mutex *mutex;
+};
-/**
- * svc_put - decrement reference count on a SUNRPC serv
- * @serv: the svc_serv to have count decremented
- *
- * When the reference count reaches zero, svc_destroy()
- * is called to clean up and free the serv.
- */
-static inline void svc_put(struct svc_serv *serv)
-{
- kref_put(&serv->sv_refcnt, svc_destroy);
-}
-
-/**
- * svc_put_not_last - decrement non-final reference count on SUNRPC serv
- * @serv: the svc_serv to have count decremented
- *
- * Returns: %true is refcount was decremented.
- *
- * If the refcount is 1, it is not decremented and instead failure is reported.
- */
-static inline bool svc_put_not_last(struct svc_serv *serv)
-{
- return refcount_dec_not_one(&serv->sv_refcnt.refcount);
-}
+void svc_destroy(struct svc_serv **svcp);
/*
* Maximum payload size supported by a kernel RPC server.
@@ -161,16 +132,15 @@ static inline bool svc_put_not_last(struct svc_serv *serv)
extern u32 svc_max_payload(const struct svc_rqst *rqstp);
/*
- * RPC Requsts and replies are stored in one or more pages.
+ * RPC Requests and replies are stored in one or more pages.
* We maintain an array of pages for each server thread.
* Requests are copied into these pages as they arrive. Remaining
* pages are available to write the reply into.
*
- * Pages are sent using ->sendpage so each server thread needs to
- * allocate more to replace those used in sending. To help keep track
- * of these pages we have a receive list where all pages initialy live,
- * and a send list where pages are moved to when there are to be part
- * of a reply.
+ * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each server thread
+ * needs to allocate more to replace those used in sending. To help keep track
+ * of these pages we have a receive list where all pages initialy live, and a
+ * send list where pages are moved to when there are to be part of a reply.
*
* We use xdr_buf for holding responses as it fits well with NFS
* read responses (that have a header, and some data pages, and possibly
@@ -196,6 +166,7 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
*/
struct svc_rqst {
struct list_head rq_all; /* all threads list */
+ struct llist_node rq_idle; /* On the idle list */
struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
@@ -223,7 +194,7 @@ struct svc_rqst {
struct page * *rq_next_page; /* next reply page to use */
struct page * *rq_page_end; /* one past the last page */
- struct pagevec rq_pvec;
+ struct folio_batch rq_fbatch;
struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
@@ -233,16 +204,6 @@ struct svc_rqst {
u32 rq_proc; /* procedure number */
u32 rq_prot; /* IP protocol */
int rq_cachetype; /* catering to nfsd */
-#define RQ_SECURE (0) /* secure port */
-#define RQ_LOCAL (1) /* local request */
-#define RQ_USEDEFERRAL (2) /* use deferral */
-#define RQ_DROPME (3) /* drop current reply */
-#define RQ_SPLICE_OK (4) /* turned off in gss privacy
- * to prevent encrypting page
- * cache pages */
-#define RQ_VICTIM (5) /* about to be shut down */
-#define RQ_BUSY (6) /* request is busy */
-#define RQ_DATA (7) /* request has data */
unsigned long rq_flags; /* flags field */
ktime_t rq_qtime; /* enqueue time */
@@ -266,12 +227,24 @@ struct svc_rqst {
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
- struct svc_cacherep * rq_cacherep; /* cache info */
struct task_struct *rq_task; /* service thread */
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
+ unsigned long bc_to_initval;
+ unsigned int bc_to_retries;
void ** rq_lease_breaker; /* The v4 client breaking a lease */
+ unsigned int rq_status_counter; /* RPC processing counter */
+};
+
+/* bits for rq_flags */
+enum {
+ RQ_SECURE, /* secure port */
+ RQ_LOCAL, /* local request */
+ RQ_USEDEFERRAL, /* use deferral */
+ RQ_DROPME, /* drop current reply */
+ RQ_VICTIM, /* Have agreed to shut down */
+ RQ_DATA, /* request has data */
};
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
@@ -309,6 +282,28 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr;
}
+/**
+ * svc_thread_should_stop - check if this thread should stop
+ * @rqstp: the thread that might need to stop
+ *
+ * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS
+ * are set. The first thread which sees SP_NEED_VICTIM clears it, becoming
+ * the victim using this function. It should then promptly call
+ * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS
+ * so the task waiting for a thread to exit can wake and continue.
+ *
+ * Return values:
+ * %true: caller should invoke svc_exit_thread()
+ * %false: caller should do nothing
+ */
+static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
+{
+ if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags))
+ set_bit(RQ_VICTIM, &rqstp->rq_flags);
+
+ return test_bit(RQ_VICTIM, &rqstp->rq_flags);
+}
+
struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt;
@@ -345,7 +340,7 @@ struct svc_program {
char * pg_name; /* service name */
char * pg_class; /* class name: services sharing authentication */
struct svc_stat * pg_stats; /* rpc statistics */
- int (*pg_authenticate)(struct svc_rqst *);
+ enum svc_auth_status (*pg_authenticate)(struct svc_rqst *rqstp);
__be32 (*pg_init_request)(struct svc_rqst *,
const struct svc_program *,
struct svc_process_info *);
@@ -419,15 +414,15 @@ void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
-int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+int svc_pool_stats_open(struct svc_info *si, struct file *file);
void svc_process(struct svc_rqst *rqstp);
-int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
- struct svc_rqst *);
+void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
int svc_register(const struct svc_serv *, struct net *, const int,
const unsigned short, const unsigned short);
void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
+void svc_pool_wake_idle_thread(struct svc_pool *pool);
struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
const char * svc_proc_name(const struct svc_rqst *rqstp);
@@ -509,6 +504,27 @@ static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
}
/**
+ * svcxdr_encode_opaque_pages - Insert pages into an xdr_stream
+ * @xdr: xdr_stream to be updated
+ * @pages: array of pages to insert
+ * @base: starting offset of first data byte in @pages
+ * @len: number of data bytes in @pages to insert
+ *
+ * After the @pages are added, the tail iovec is instantiated pointing
+ * to end of the head buffer, and the stream is set up to encode
+ * subsequent items into the tail.
+ */
+static inline void svcxdr_encode_opaque_pages(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct page **pages,
+ unsigned int base,
+ unsigned int len)
+{
+ xdr_write_pages(xdr, pages, base, len);
+ xdr->page_ptr = rqstp->rq_next_page - 1;
+}
+
+/**
* svcxdr_set_auth_slack -
* @rqstp: RPC transaction
* @slack: buffer space to reserve for the transaction's security flavor
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index fbc4bd423b35..e7595ae62fe2 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -65,6 +65,7 @@ extern unsigned int svcrdma_ord;
extern unsigned int svcrdma_max_requests;
extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size;
+extern struct workqueue_struct *svcrdma_wq;
extern struct percpu_counter svcrdma_stat_read;
extern struct percpu_counter svcrdma_stat_recv;
@@ -97,6 +98,7 @@ struct svcxprt_rdma {
u32 sc_pending_recvs;
u32 sc_recv_batch;
struct list_head sc_rq_dto_q;
+ struct list_head sc_read_complete_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq;
@@ -115,6 +117,13 @@ struct svcxprt_rdma {
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
+static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp)
+{
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+
+ return container_of(xprt, struct svcxprt_rdma, sc_xprt);
+}
+
/*
* Default connection parameters
*/
@@ -126,6 +135,43 @@ enum {
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+/**
+ * svc_rdma_send_cid_init - Initialize a Receive Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_rq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/**
+ * svc_rdma_send_cid_init - Initialize a Send Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_sq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/*
+ * A chunk context tracks all I/O for moving one Read or Write
+ * chunk. This is a set of rdma_rw's that handle data movement
+ * for all segments of one chunk.
+ */
+struct svc_rdma_chunk_ctxt {
+ struct rpc_rdma_cid cc_cid;
+ struct ib_cqe cc_cqe;
+ struct list_head cc_rwctxts;
+ ktime_t cc_posttime;
+ int cc_sqecount;
+};
+
struct svc_rdma_recv_ctxt {
struct llist_node rc_node;
struct list_head rc_list;
@@ -135,32 +181,42 @@ struct svc_rdma_recv_ctxt {
struct ib_sge rc_recv_sge;
void *rc_recv_buf;
struct xdr_stream rc_stream;
- bool rc_temp;
u32 rc_byte_len;
- unsigned int rc_page_count;
u32 rc_inv_rkey;
__be32 rc_msgtype;
+ /* State for pulling a Read chunk */
+ unsigned int rc_pageoff;
+ unsigned int rc_curpage;
+ unsigned int rc_readbytes;
+ struct xdr_buf rc_saved_arg;
+ struct svc_rdma_chunk_ctxt rc_cc;
+
struct svc_rdma_pcl rc_call_pcl;
struct svc_rdma_pcl rc_read_pcl;
struct svc_rdma_chunk *rc_cur_result_payload;
struct svc_rdma_pcl rc_write_pcl;
struct svc_rdma_pcl rc_reply_pcl;
+
+ unsigned int rc_page_count;
+ struct page *rc_pages[RPCSVC_MAXPAGES];
};
struct svc_rdma_send_ctxt {
struct llist_node sc_node;
struct rpc_rdma_cid sc_cid;
+ struct work_struct sc_work;
+ struct svcxprt_rdma *sc_rdma;
struct ib_send_wr sc_send_wr;
struct ib_cqe sc_cqe;
- struct completion sc_done;
struct xdr_buf sc_hdrbuf;
struct xdr_stream sc_stream;
void *sc_xprt_buf;
+ int sc_page_count;
int sc_cur_sge_no;
-
+ struct page *sc_pages[RPCSVC_MAXPAGES];
struct ib_sge sc_sges[];
};
@@ -181,6 +237,11 @@ extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
+extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc);
+extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc,
+ enum dma_data_direction dir);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
const struct svc_rdma_chunk *chunk,
const struct xdr_buf *xdr);
@@ -201,7 +262,8 @@ extern int svc_rdma_send(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
- const struct svc_rdma_recv_ctxt *rctxt,
+ const struct svc_rdma_pcl *write_pcl,
+ const struct svc_rdma_pcl *reply_pcl,
const struct xdr_buf *xdr);
extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index a6b12631db21..8e20cd60e2e7 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -54,25 +54,8 @@ struct svc_xprt {
const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
struct list_head xpt_list;
- struct list_head xpt_ready;
+ struct lwq_node xpt_ready;
unsigned long xpt_flags;
-#define XPT_BUSY 0 /* enqueued/receiving */
-#define XPT_CONN 1 /* conn pending */
-#define XPT_CLOSE 2 /* dead or dying */
-#define XPT_DATA 3 /* data pending */
-#define XPT_TEMP 4 /* connected transport */
-#define XPT_DEAD 6 /* transport closed */
-#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
-#define XPT_DEFERRED 8 /* deferred request pending */
-#define XPT_OLD 9 /* used for xprt aging mark+sweep */
-#define XPT_LISTENER 10 /* listening endpoint */
-#define XPT_CACHE_AUTH 11 /* cache auth info */
-#define XPT_LOCAL 12 /* connection from loopback interface */
-#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
-#define XPT_CONG_CTRL 14 /* has congestion control */
-#define XPT_HANDSHAKE 15 /* xprt requests a handshake */
-#define XPT_TLS_SESSION 16 /* transport-layer security established */
-#define XPT_PEER_AUTH 17 /* peer has been authenticated */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
@@ -97,6 +80,27 @@ struct svc_xprt {
struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
};
+/* flag bits for xpt_flags */
+enum {
+ XPT_BUSY, /* enqueued/receiving */
+ XPT_CONN, /* conn pending */
+ XPT_CLOSE, /* dead or dying */
+ XPT_DATA, /* data pending */
+ XPT_TEMP, /* connected transport */
+ XPT_DEAD, /* transport closed */
+ XPT_CHNGBUF, /* need to change snd/rcv buf sizes */
+ XPT_DEFERRED, /* deferred request pending */
+ XPT_OLD, /* used for xprt aging mark+sweep */
+ XPT_LISTENER, /* listening endpoint */
+ XPT_CACHE_AUTH, /* cache auth info */
+ XPT_LOCAL, /* connection from loopback interface */
+ XPT_KILL_TEMP, /* call xpo_kill_temp_xprt before closing */
+ XPT_CONG_CTRL, /* has congestion control */
+ XPT_HANDSHAKE, /* xprt requests a handshake */
+ XPT_TLS_SESSION, /* transport-layer security established */
+ XPT_PEER_AUTH, /* peer has been authenticated */
+};
+
static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
{
spin_lock(&xpt->xpt_lock);
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 6d9cc9080aca..61c455f1e1f5 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -83,6 +83,19 @@ struct auth_domain {
struct rcu_head rcu_head;
};
+enum svc_auth_status {
+ SVC_GARBAGE = 1,
+ SVC_SYSERR,
+ SVC_VALID,
+ SVC_NEGATIVE,
+ SVC_OK,
+ SVC_DROP,
+ SVC_CLOSE,
+ SVC_DENIED,
+ SVC_PENDING,
+ SVC_COMPLETE,
+};
+
/*
* Each authentication flavour registers an auth_ops
* structure.
@@ -98,6 +111,8 @@ struct auth_domain {
* is (probably) already in place. Certainly space is
* reserved for it.
* DROP - simply drop the request. It may have been deferred
+ * CLOSE - like SVC_DROP, but request is definitely lost.
+ * If there is a tcp connection, it should be closed.
* GARBAGE - rpc garbage_args error
* SYSERR - rpc system_err error
* DENIED - authp holds reason for denial.
@@ -111,60 +126,45 @@ struct auth_domain {
*
* release() is given a request after the procedure has been run.
* It should sign/encrypt the results if needed
- * It should return:
- * OK - the resbuf is ready to be sent
- * DROP - the reply should be quitely dropped
- * DENIED - authp holds a reason for MSG_DENIED
- * SYSERR - rpc system_err
*
* domain_release()
* This call releases a domain.
+ *
* set_client()
- * Givens a pending request (struct svc_rqst), finds and assigns
+ * Given a pending request (struct svc_rqst), finds and assigns
* an appropriate 'auth_domain' as the client.
+ *
+ * pseudoflavor()
+ * Returns RPC_AUTH pseudoflavor in use by @rqstp.
*/
struct auth_ops {
char * name;
struct module *owner;
int flavour;
- int (*accept)(struct svc_rqst *rq);
- int (*release)(struct svc_rqst *rq);
- void (*domain_release)(struct auth_domain *);
- int (*set_client)(struct svc_rqst *rq);
-};
-#define SVC_GARBAGE 1
-#define SVC_SYSERR 2
-#define SVC_VALID 3
-#define SVC_NEGATIVE 4
-#define SVC_OK 5
-#define SVC_DROP 6
-#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely
- * lost so if there is a tcp connection, it
- * should be closed
- */
-#define SVC_DENIED 8
-#define SVC_PENDING 9
-#define SVC_COMPLETE 10
+ enum svc_auth_status (*accept)(struct svc_rqst *rqstp);
+ int (*release)(struct svc_rqst *rqstp);
+ void (*domain_release)(struct auth_domain *dom);
+ enum svc_auth_status (*set_client)(struct svc_rqst *rqstp);
+ rpc_authflavor_t (*pseudoflavor)(struct svc_rqst *rqstp);
+};
struct svc_xprt;
-extern int svc_authenticate(struct svc_rqst *rqstp);
+extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
+extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
-extern int svc_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
-extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
extern struct auth_domain *auth_domain_find(char *name);
-extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
-extern int auth_unix_forget_old(struct auth_domain *dom);
extern void svcauth_unix_purge(struct net *net);
extern void svcauth_unix_info_release(struct svc_xprt *xpt);
-extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svcauth_unix_set_client(struct svc_rqst *rqstp);
extern int unix_gid_cache_create(struct net *net);
extern void unix_gid_cache_destroy(struct net *net);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index a7116048a4d4..7c78ec6356b9 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -35,8 +35,8 @@ struct svc_sock {
/* Total length of the data (not including fragment headers)
* received so far in the fragments making up this rpc: */
u32 sk_datalen;
- /* Number of queued send requests */
- atomic_t sk_sendqlen;
+
+ struct page_frag_cache sk_frag_cache;
struct completion sk_handshake_done;
@@ -56,8 +56,7 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
/*
* Function prototypes.
*/
-void svc_close_net(struct svc_serv *, struct net *);
-int svc_recv(struct svc_rqst *, long);
+void svc_recv(struct svc_rqst *rqstp);
void svc_send(struct svc_rqst *rqstp);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
@@ -66,8 +65,6 @@ int svc_addsock(struct svc_serv *serv, struct net *net,
const struct cred *cred);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
-void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 72014c9216fc..2f8dc47f1eb0 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -139,6 +139,8 @@ void xdr_terminate_string(const struct xdr_buf *, const u32);
size_t xdr_buf_pagecount(const struct xdr_buf *buf);
int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
void xdr_free_bvec(struct xdr_buf *buf);
+unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size,
+ const struct xdr_buf *xdr);
static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
{
@@ -224,6 +226,7 @@ struct xdr_stream {
struct kvec *iov; /* pointer to the current kvec */
struct kvec scratch; /* Scratch buffer */
struct page **page_ptr; /* pointer to the current page */
+ void *page_kaddr; /* kmapped address of the current page */
unsigned int nwords; /* Remaining decode buffer length */
struct rpc_rqst *rqst; /* For debugging */
@@ -242,8 +245,7 @@ extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
extern void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, struct rpc_rqst *rqst);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
-extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec,
- size_t nbytes);
+extern int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes);
extern void __xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
extern void xdr_truncate_decode(struct xdr_stream *xdr, size_t len);
@@ -256,6 +258,7 @@ extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len);
+extern void xdr_finish_decode(struct xdr_stream *xdr);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
@@ -776,7 +779,7 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
return -EBADMSG;
- if (len > SIZE_MAX / sizeof(*p))
+ if (U32_MAX >= SIZE_MAX / sizeof(*p) && len > SIZE_MAX / sizeof(*p))
return -EBADMSG;
p = xdr_inline_decode(xdr, len * sizeof(*p));
if (unlikely(!p))
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index b9f59aabee53..464f6a9492ab 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -30,17 +30,6 @@
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
-/*
- * This describes a timeout strategy
- */
-struct rpc_timeout {
- unsigned long to_initval, /* initial timeout */
- to_maxval, /* max timeout */
- to_increment; /* if !exponential */
- unsigned int to_retries; /* max # of retries */
- unsigned char to_exponential;
-};
-
enum rpc_display_format_t {
RPC_DISPLAY_ADDR = 0,
RPC_DISPLAY_PORT,
@@ -57,6 +46,7 @@ struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
+#include <linux/lwq.h>
/*
* This describes a complete RPC request
@@ -121,7 +111,7 @@ struct rpc_rqst {
int rq_ntrans;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head rq_bc_list; /* Callback service list */
+ struct lwq_node rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_SUNRPC_BACKCHANEL */
@@ -129,6 +119,21 @@ struct rpc_rqst {
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
+/* RPC transport layer security policies */
+enum xprtsec_policies {
+ RPC_XPRTSEC_NONE = 0,
+ RPC_XPRTSEC_TLS_ANON,
+ RPC_XPRTSEC_TLS_X509,
+};
+
+struct xprtsec_parms {
+ enum xprtsec_policies policy;
+
+ /* authentication material */
+ key_serial_t cert_serial;
+ key_serial_t privkey_serial;
+};
+
struct rpc_xprt_ops {
void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -185,6 +190,7 @@ enum xprt_transports {
XPRT_TRANSPORT_RDMA = 256,
XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
XPRT_TRANSPORT_LOCAL = 257,
+ XPRT_TRANSPORT_TCP_TLS = 258,
};
struct rpc_sysfs_xprt;
@@ -229,6 +235,7 @@ struct rpc_xprt {
*/
unsigned long bind_timeout,
reestablish_timeout;
+ struct xprtsec_parms xprtsec;
unsigned int connect_cookie; /* A cookie that gets bumped
every time the transport
is reconnected */
@@ -333,6 +340,9 @@ struct xprt_create {
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
struct rpc_xprt_switch *bc_xps;
unsigned int flags;
+ struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct xprt_class {
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 38284f25eddf..700a1e6c047c 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -57,9 +57,11 @@ struct sock_xprt {
struct work_struct error_worker;
struct work_struct recv_worker;
struct mutex recv_mutex;
+ struct completion handshake_done;
struct sockaddr_storage srcaddr;
unsigned short srcport;
int xprt_err;
+ struct rpc_clnt *clnt;
/*
* UDP socket buffer size parameters
@@ -90,5 +92,6 @@ struct sock_xprt {
#define XPRT_SOCK_WAKE_DISCONNECT (7)
#define XPRT_SOCK_CONNECT_SENT (8)
#define XPRT_SOCK_NOSPACE (9)
+#define XPRT_SOCK_IGNORE_RECV (10)
#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h
deleted file mode 100644
index 8d3376775813..000000000000
--- a/include/linux/superhyway.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * include/linux/superhyway.h
- *
- * SuperHyway Bus definitions
- *
- * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __LINUX_SUPERHYWAY_H
-#define __LINUX_SUPERHYWAY_H
-
-#include <linux/device.h>
-
-/*
- * SuperHyway IDs
- */
-#define SUPERHYWAY_DEVICE_ID_SH5_DMAC 0x0183
-
-struct superhyway_vcr_info {
- u8 perr_flags; /* P-port Error flags */
- u8 merr_flags; /* Module Error flags */
- u16 mod_vers; /* Module Version */
- u16 mod_id; /* Module ID */
- u8 bot_mb; /* Bottom Memory block */
- u8 top_mb; /* Top Memory block */
-};
-
-struct superhyway_ops {
- int (*read_vcr)(unsigned long base, struct superhyway_vcr_info *vcr);
- int (*write_vcr)(unsigned long base, struct superhyway_vcr_info vcr);
-};
-
-struct superhyway_bus {
- struct superhyway_ops *ops;
-};
-
-extern struct superhyway_bus superhyway_channels[];
-
-struct superhyway_device_id {
- unsigned int id;
- unsigned long driver_data;
-};
-
-struct superhyway_device;
-extern struct bus_type superhyway_bus_type;
-
-struct superhyway_driver {
- char *name;
-
- const struct superhyway_device_id *id_table;
- struct device_driver drv;
-
- int (*probe)(struct superhyway_device *dev, const struct superhyway_device_id *id);
- void (*remove)(struct superhyway_device *dev);
-};
-
-#define to_superhyway_driver(d) container_of((d), struct superhyway_driver, drv)
-
-struct superhyway_device {
- char name[32];
-
- struct device dev;
-
- struct superhyway_device_id id;
- struct superhyway_driver *drv;
- struct superhyway_bus *bus;
-
- int num_resources;
- struct resource *resource;
- struct superhyway_vcr_info vcr;
-};
-
-#define to_superhyway_device(d) container_of((d), struct superhyway_device, dev)
-
-#define superhyway_get_drvdata(d) dev_get_drvdata(&(d)->dev)
-#define superhyway_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
-
-static inline int
-superhyway_read_vcr(struct superhyway_device *dev, unsigned long base,
- struct superhyway_vcr_info *vcr)
-{
- return dev->bus->ops->read_vcr(base, vcr);
-}
-
-static inline int
-superhyway_write_vcr(struct superhyway_device *dev, unsigned long base,
- struct superhyway_vcr_info vcr)
-{
- return dev->bus->ops->write_vcr(base, vcr);
-}
-
-extern int superhyway_scan_bus(struct superhyway_bus *);
-
-/* drivers/sh/superhyway/superhyway.c */
-int superhyway_register_driver(struct superhyway_driver *);
-void superhyway_unregister_driver(struct superhyway_driver *);
-int superhyway_add_device(unsigned long base, struct superhyway_device *, struct superhyway_bus *);
-int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices);
-
-/* drivers/sh/superhyway/superhyway-sysfs.c */
-extern const struct attribute_group *superhyway_dev_groups[];
-
-#endif /* __LINUX_SUPERHYWAY_H */
-
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
index cb7980805920..5b67f0f47d80 100644
--- a/include/linux/surface_aggregator/controller.h
+++ b/include/linux/surface_aggregator/controller.h
@@ -44,7 +44,7 @@ struct ssam_event {
u8 command_id;
u8 instance_id;
u16 length;
- u8 data[];
+ u8 data[] __counted_by(length);
};
/**
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
index 42b249b4c24b..8cd8c38cf3f3 100644
--- a/include/linux/surface_aggregator/device.h
+++ b/include/linux/surface_aggregator/device.h
@@ -193,7 +193,6 @@ struct ssam_device_driver {
#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
-extern struct bus_type ssam_bus_type;
extern const struct device_type ssam_device_type;
/**
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
index 5c4ae1a26183..d8dbef6b7fc2 100644
--- a/include/linux/surface_aggregator/serial_hub.h
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -12,7 +12,7 @@
#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
-#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
@@ -188,7 +188,7 @@ static_assert(sizeof(struct ssh_command) == 8);
*/
static inline u16 ssh_crc(const u8 *buf, size_t len)
{
- return crc_ccitt_false(0xffff, buf, len);
+ return crc_itu_t(0xffff, buf, len);
}
/*
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d0d4598a7b3f..ef503088942d 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -202,6 +202,7 @@ struct platform_s2idle_ops {
};
#ifdef CONFIG_SUSPEND
+extern suspend_state_t pm_suspend_target_state;
extern suspend_state_t mem_sleep_current;
extern suspend_state_t mem_sleep_default;
@@ -337,6 +338,8 @@ extern bool sync_on_suspend_enabled;
#else /* !CONFIG_SUSPEND */
#define suspend_valid_only_mem NULL
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+
static inline void pm_suspend_clear_flags(void) {}
static inline void pm_set_suspend_via_firmware(void) {}
static inline void pm_set_resume_via_firmware(void) {}
@@ -364,9 +367,6 @@ struct pbe {
struct pbe *next;
};
-/* mm/page_alloc.c */
-extern void mark_free_pages(struct zone *zone);
-
/**
* struct platform_hibernation_ops - hibernation platform support
*
@@ -452,6 +452,10 @@ extern struct pbe *restore_pblist;
int pfn_is_nosave(unsigned long pfn);
int hibernate_quiet_exec(int (*func)(void *data), void *data);
+int hibernate_resume_nonboot_cpu_disable(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+
#else /* CONFIG_HIBERNATION */
static inline void register_nosave_region(unsigned long b, unsigned long e) {}
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
@@ -468,6 +472,8 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
}
#endif /* CONFIG_HIBERNATION */
+int arch_resume_nosmt(void);
+
#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
int is_hibernate_resume_dev(dev_t dev);
#else
@@ -503,7 +509,11 @@ extern void pm_report_max_hw_sleep(u64 t);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
-extern suspend_state_t pm_suspend_target_state;
+
+static inline bool pm_suspended_storage(void)
+{
+ return !gfp_has_io_fs(gfp_allowed_mask);
+}
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
@@ -538,6 +548,7 @@ static inline void ksys_sync_helper(void) {}
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
+static inline bool pm_suspended_storage(void) { return false; }
static inline bool pm_wakeup_pending(void) { return false; }
static inline void pm_system_wakeup(void) {}
static inline void pm_wakeup_clear(bool reset) {}
@@ -551,6 +562,7 @@ static inline void unlock_system_sleep(unsigned int flags) {}
#ifdef CONFIG_PM_SLEEP_DEBUG
extern bool pm_print_times_enabled;
extern bool pm_debug_messages_on;
+extern bool pm_debug_messages_should_print(void);
static inline int pm_dyn_debug_messages_on(void)
{
#ifdef CONFIG_DYNAMIC_DEBUG
@@ -564,14 +576,14 @@ static inline int pm_dyn_debug_messages_on(void)
#endif
#define __pm_pr_dbg(fmt, ...) \
do { \
- if (pm_debug_messages_on) \
+ if (pm_debug_messages_should_print()) \
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
else if (pm_dyn_debug_messages_on()) \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
#define __pm_deferred_pr_dbg(fmt, ...) \
do { \
- if (pm_debug_messages_on) \
+ if (pm_debug_messages_should_print()) \
printk_deferred(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#else
@@ -589,7 +601,8 @@ static inline int pm_dyn_debug_messages_on(void)
/**
* pm_pr_dbg - print pm sleep debug messages
*
- * If pm_debug_messages_on is enabled, print message.
+ * If pm_debug_messages_on is enabled and the system is entering/leaving
+ * suspend, print message.
* If pm_debug_messages_on is disabled and CONFIG_DYNAMIC_DEBUG is enabled,
* print message only from instances explicitly enabled on dynamic debug's
* control.
diff --git a/include/linux/swait.h b/include/linux/swait.h
index 6a8c22b8c2a5..d324419482a0 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -146,7 +146,7 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
-extern void swake_up_locked(struct swait_queue_head *q);
+extern void swake_up_locked(struct swait_queue_head *q, int wake_flags);
extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 3c69cb653cb9..8d28f6091a32 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -298,14 +298,11 @@ struct swap_info_struct {
unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
+ struct bdev_handle *bdev_handle;/* open handle of the bdev */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
struct completion comp; /* seldom referenced */
-#ifdef CONFIG_FRONTSWAP
- unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
- atomic_t frontswap_pages; /* frontswap pages in-use counter */
-#endif
spinlock_t lock; /*
* protect map scan related fields like
* swap_map, lowest_bit, highest_bit,
@@ -337,37 +334,17 @@ struct swap_info_struct {
*/
};
-#ifdef CONFIG_64BIT
-#define SWAP_RA_ORDER_CEILING 5
-#else
-/* Avoid stack overflow, because we need to save part of page table */
-#define SWAP_RA_ORDER_CEILING 3
-#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
-#endif
-
-struct vma_swap_readahead {
- unsigned short win;
- unsigned short offset;
- unsigned short nr_pte;
-#ifdef CONFIG_64BIT
- pte_t *ptes;
-#else
- pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
-#endif
-};
-
-static inline swp_entry_t folio_swap_entry(struct folio *folio)
+static inline swp_entry_t page_swap_entry(struct page *page)
{
- swp_entry_t entry = { .val = page_private(&folio->page) };
- return entry;
-}
+ struct folio *folio = page_folio(page);
+ swp_entry_t entry = folio->swap;
-static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
-{
- folio->private = (void *)entry.val;
+ entry.val += folio_page_idx(folio, page);
+ return entry;
}
/* linux/mm/workingset.c */
+bool workingset_test_recent(void *shadow, bool file, bool *workingset);
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
void workingset_refault(struct folio *folio, void *shadow);
@@ -420,9 +397,6 @@ void folio_deactivate(struct folio *folio);
void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);
-extern void lru_cache_add_inactive_or_unevictable(struct page *page,
- struct vm_area_struct *vma);
-
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
@@ -457,10 +431,9 @@ static inline bool node_reclaim_enabled(void)
}
void check_move_unevictable_folios(struct folio_batch *fbatch);
-void check_move_unevictable_pages(struct pagevec *pvec);
-extern void kswapd_run(int nid);
-extern void kswapd_stop(int nid);
+extern void __meminit kswapd_run(int nid);
+extern void __meminit kswapd_stop(int nid);
#ifdef CONFIG_SWAP
@@ -512,15 +485,14 @@ int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
extern sector_t swapdev_block(int, pgoff_t);
extern int __swap_count(swp_entry_t entry);
-extern int __swp_swapcount(swp_entry_t entry);
+extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
-extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
+struct swap_info_struct *swp_swap_info(swp_entry_t entry);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
-sector_t swap_page_sector(struct page *page);
+sector_t swap_folio_sector(struct folio *folio);
static inline void put_swap_device(struct swap_info_struct *si)
{
@@ -577,6 +549,11 @@ static inline int swap_duplicate(swp_entry_t swp)
return 0;
}
+static inline int swapcache_prepare(swp_entry_t swp)
+{
+ return 0;
+}
+
static inline void swap_free(swp_entry_t swp)
{
}
@@ -590,7 +567,7 @@ static inline int __swap_count(swp_entry_t entry)
return 0;
}
-static inline int __swp_swapcount(swp_entry_t entry)
+static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
{
return 0;
}
@@ -649,11 +626,6 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
}
#endif
-#ifdef CONFIG_ZSWAP
-extern u64 zswap_pool_total_size;
-extern atomic_t zswap_stored_pages;
-#endif
-
#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 7ed529a77c5b..99e3ed469e88 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -2,11 +2,6 @@
#ifndef _LINUX_SWAPFILE_H
#define _LINUX_SWAPFILE_H
-/*
- * these were static in swapfile.c but frontswap.c needs them and we don't
- * want to expose them to the dozens of source files that include swap.h
- */
-extern struct swap_info_struct *swap_info[];
extern unsigned long generic_max_swapfile_size(void);
unsigned long arch_max_swapfile_size(void);
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 3a451b7afcb3..bff1e8d97de0 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -332,15 +332,9 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
return false;
}
-extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
- spinlock_t *ptl);
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
-#ifdef CONFIG_HUGETLB_PAGE
-extern void __migration_entry_wait_huge(struct vm_area_struct *vma,
- pte_t *ptep, spinlock_t *ptl);
extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
-#endif /* CONFIG_HUGETLB_PAGE */
#else /* CONFIG_MIGRATION */
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
@@ -362,15 +356,10 @@ static inline int is_migration_entry(swp_entry_t swp)
return 0;
}
-static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
- spinlock_t *ptl) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
- unsigned long address) { }
-#ifdef CONFIG_HUGETLB_PAGE
-static inline void __migration_entry_wait_huge(struct vm_area_struct *vma,
- pte_t *ptep, spinlock_t *ptl) { }
-static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
-#endif /* CONFIG_HUGETLB_PAGE */
+ unsigned long address) { }
+static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
+ pte_t *pte) { }
static inline int is_writable_migration_entry(swp_entry_t entry)
{
return 0;
@@ -404,7 +393,12 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
typedef unsigned long pte_marker;
#define PTE_MARKER_UFFD_WP BIT(0)
-#define PTE_MARKER_SWAPIN_ERROR BIT(1)
+/*
+ * "Poisoned" here is meant in the very general sense of "future accesses are
+ * invalid", instead of referring very specifically to hardware memory errors.
+ * This marker is meant to represent any of various different causes of this.
+ */
+#define PTE_MARKER_POISONED BIT(1)
#define PTE_MARKER_MASK (BIT(2) - 1)
static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
@@ -432,15 +426,15 @@ static inline pte_t make_pte_marker(pte_marker marker)
return swp_entry_to_pte(make_pte_marker_entry(marker));
}
-static inline swp_entry_t make_swapin_error_entry(void)
+static inline swp_entry_t make_poisoned_swp_entry(void)
{
- return make_pte_marker_entry(PTE_MARKER_SWAPIN_ERROR);
+ return make_pte_marker_entry(PTE_MARKER_POISONED);
}
-static inline int is_swapin_error_entry(swp_entry_t entry)
+static inline int is_poisoned_swp_entry(swp_entry_t entry)
{
return is_pte_marker_entry(entry) &&
- (pte_marker_get(entry) & PTE_MARKER_SWAPIN_ERROR);
+ (pte_marker_get(entry) & PTE_MARKER_POISONED);
}
/*
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 7af2673b47ba..ecde0312dd52 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
struct device;
struct page;
@@ -62,8 +63,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
#ifdef CONFIG_SWIOTLB
/**
- * struct io_tlb_mem - IO TLB Memory Pool Descriptor
- *
+ * struct io_tlb_pool - IO TLB memory pool descriptor
* @start: The start address of the swiotlb memory pool. Used to do a quick
* range check to see if the memory was in fact allocated by this
* API.
@@ -73,20 +73,48 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
* @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
* may be remapped in the memory encrypted case and store virtual
* address for bounce buffer operation.
- * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
- * @end. For default swiotlb, this is command line adjustable via
- * setup_io_tlb_npages.
- * @used: The number of used IO TLB block.
- * @list: The free list describing the number of free entries available
- * from each index.
- * @orig_addr: The original address corresponding to a mapped entry.
- * @alloc_size: Size of the allocated buffer.
+ * @nslabs: The number of IO TLB slots between @start and @end. For the
+ * default swiotlb, this can be adjusted with a boot parameter,
+ * see setup_io_tlb_npages().
+ * @late_alloc: %true if allocated using the page allocator.
+ * @nareas: Number of areas in the pool.
+ * @area_nslabs: Number of slots in each area.
+ * @areas: Array of memory area descriptors.
+ * @slots: Array of slot descriptors.
+ * @node: Member of the IO TLB memory pool list.
+ * @rcu: RCU head for swiotlb_dyn_free().
+ * @transient: %true if transient memory pool.
+ */
+struct io_tlb_pool {
+ phys_addr_t start;
+ phys_addr_t end;
+ void *vaddr;
+ unsigned long nslabs;
+ bool late_alloc;
+ unsigned int nareas;
+ unsigned int area_nslabs;
+ struct io_tlb_area *areas;
+ struct io_tlb_slot *slots;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head node;
+ struct rcu_head rcu;
+ bool transient;
+#endif
+};
+
+/**
+ * struct io_tlb_mem - Software IO TLB allocator
+ * @defpool: Default (initial) IO TLB memory pool descriptor.
+ * @pool: IO TLB memory pool descriptor (if not dynamic).
+ * @nslabs: Total number of IO TLB slabs in all pools.
* @debugfs: The dentry to debugfs.
- * @late_alloc: %true if allocated using the page allocator
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
- * @nareas: The area number in the pool.
- * @area_nslabs: The slot number in the area.
+ * @can_grow: %true if more pools can be allocated dynamically.
+ * @phys_limit: Maximum allowed physical address.
+ * @lock: Lock to synchronize changes to the list.
+ * @pools: List of IO TLB memory pool descriptors (if dynamic).
+ * @dyn_alloc: Dynamic IO TLB pool allocation work.
* @total_used: The total number of slots in the pool that are currently used
* across all areas. Used only for calculating used_hiwater in
* debugfs.
@@ -94,31 +122,73 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
* in debugfs.
*/
struct io_tlb_mem {
- phys_addr_t start;
- phys_addr_t end;
- void *vaddr;
+ struct io_tlb_pool defpool;
unsigned long nslabs;
- unsigned long used;
struct dentry *debugfs;
- bool late_alloc;
bool force_bounce;
bool for_alloc;
- unsigned int nareas;
- unsigned int area_nslabs;
- struct io_tlb_area *areas;
- struct io_tlb_slot *slots;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ bool can_grow;
+ u64 phys_limit;
+ spinlock_t lock;
+ struct list_head pools;
+ struct work_struct dyn_alloc;
+#endif
#ifdef CONFIG_DEBUG_FS
atomic_long_t total_used;
atomic_long_t used_hiwater;
#endif
};
-extern struct io_tlb_mem io_tlb_default_mem;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+
+struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr);
+
+#else
+
+static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
+ phys_addr_t paddr)
+{
+ return &dev->dma_io_tlb_mem->defpool;
+}
+
+#endif
+
+/**
+ * is_swiotlb_buffer() - check if a physical address belongs to a swiotlb
+ * @dev: Device which has mapped the buffer.
+ * @paddr: Physical address within the DMA buffer.
+ *
+ * Check if @paddr points into a bounce buffer.
+ *
+ * Return:
+ * * %true if @paddr points into a bounce buffer
+ * * %false otherwise
+ */
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
- return mem && paddr >= mem->start && paddr < mem->end;
+ if (!mem)
+ return false;
+
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ /*
+ * All SWIOTLB buffer addresses must have been returned by
+ * swiotlb_tbl_map_single() and passed to a device driver.
+ * If a SWIOTLB address is checked on another CPU, then it was
+ * presumably loaded by the device driver from an unspecified private
+ * data structure. Make sure that this load is ordered before reading
+ * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
+ *
+ * This barrier pairs with smp_mb() in swiotlb_find_slots().
+ */
+ smp_rmb();
+ return READ_ONCE(dev->dma_uses_io_tlb) &&
+ swiotlb_find_pool(dev, paddr);
+#else
+ return paddr >= mem->defpool.start && paddr < mem->defpool.end;
+#endif
}
static inline bool is_swiotlb_force_bounce(struct device *dev)
@@ -130,13 +200,22 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
void swiotlb_init(bool addressing_limited, unsigned int flags);
void __init swiotlb_exit(void);
+void swiotlb_dev_init(struct device *dev);
size_t swiotlb_max_mapping_size(struct device *dev);
+bool is_swiotlb_allocated(void);
bool is_swiotlb_active(struct device *dev);
void __init swiotlb_adjust_size(unsigned long size);
+phys_addr_t default_swiotlb_base(void);
+phys_addr_t default_swiotlb_limit(void);
#else
static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
{
}
+
+static inline void swiotlb_dev_init(struct device *dev)
+{
+}
+
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
return false;
@@ -153,6 +232,11 @@ static inline size_t swiotlb_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
+static inline bool is_swiotlb_allocated(void)
+{
+ return false;
+}
+
static inline bool is_swiotlb_active(struct device *dev)
{
return false;
@@ -161,6 +245,16 @@ static inline bool is_swiotlb_active(struct device *dev)
static inline void swiotlb_adjust_size(unsigned long size)
{
}
+
+static inline phys_addr_t default_swiotlb_base(void)
+{
+ return 0;
+}
+
+static inline phys_addr_t default_swiotlb_limit(void)
+{
+ return 0;
+}
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 48fabe36509e..8d8fac1626bd 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -41,6 +41,7 @@ enum {
enum switchtec_gen {
SWITCHTEC_GEN3,
SWITCHTEC_GEN4,
+ SWITCHTEC_GEN5,
};
struct mrpc_regs {
diff --git a/include/linux/syscall_user_dispatch.h b/include/linux/syscall_user_dispatch.h
index 641ca8880995..3858a6ffdd5c 100644
--- a/include/linux/syscall_user_dispatch.h
+++ b/include/linux/syscall_user_dispatch.h
@@ -6,16 +6,10 @@
#define _SYSCALL_USER_DISPATCH_H
#include <linux/thread_info.h>
+#include <linux/syscall_user_dispatch_types.h>
#ifdef CONFIG_GENERIC_ENTRY
-struct syscall_user_dispatch {
- char __user *selector;
- unsigned long offset;
- unsigned long len;
- bool on_dispatch;
-};
-
int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
unsigned long len, char __user *selector);
@@ -29,7 +23,6 @@ int syscall_user_dispatch_set_config(struct task_struct *task, unsigned long siz
void __user *data);
#else
-struct syscall_user_dispatch {};
static inline int set_syscall_user_dispatch(unsigned long mode, unsigned long offset,
unsigned long len, char __user *selector)
diff --git a/include/linux/syscall_user_dispatch_types.h b/include/linux/syscall_user_dispatch_types.h
new file mode 100644
index 000000000000..3be36b06c7d7
--- /dev/null
+++ b/include/linux/syscall_user_dispatch_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SYSCALL_USER_DISPATCH_TYPES_H
+#define _SYSCALL_USER_DISPATCH_TYPES_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_GENERIC_ENTRY
+
+struct syscall_user_dispatch {
+ char __user *selector;
+ unsigned long offset;
+ unsigned long len;
+ bool on_dispatch;
+};
+
+#else
+
+struct syscall_user_dispatch {};
+
+#endif
+
+#endif /* _SYSCALL_USER_DISPATCH_TYPES_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 33a0ee3bcb2e..77eb9b0e7685 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -71,7 +71,12 @@ struct clone_args;
struct open_how;
struct mount_attr;
struct landlock_ruleset_attr;
+struct lsm_ctx;
enum landlock_rule_type;
+struct cachestat_range;
+struct cachestat;
+struct statmount;
+struct mnt_id_req;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -123,6 +128,7 @@ enum landlock_rule_type;
#define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
#define __SC_CAST(t, a) (__force t) a
+#define __SC_TYPE(t, a) t
#define __SC_ARGS(t, a) a
#define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
@@ -282,22 +288,6 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
#endif
/*
- * Called before coming back to user-mode. Returning to user-mode with an
- * address limit different than USER_DS can allow to overwrite kernel memory.
- */
-static inline void addr_limit_user_check(void)
-{
-#ifdef TIF_FSCHECK
- if (!test_thread_flag(TIF_FSCHECK))
- return;
-#endif
-
-#ifdef TIF_FSCHECK
- clear_thread_flag(TIF_FSCHECK);
-#endif
-}
-
-/*
* These syscall function prototypes are kept in the same order as
* include/uapi/asm-generic/unistd.h. Architecture specific entries go below,
* followed by deprecated or obsolete system calls.
@@ -346,8 +336,6 @@ asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit,
const void __user *argp, size_t argsz);
asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op,
void __user *arg, unsigned int nr_args);
-
-/* fs/xattr.c */
asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
const void __user *value, size_t size, int flags);
asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
@@ -370,17 +358,8 @@ asmlinkage long sys_removexattr(const char __user *path,
asmlinkage long sys_lremovexattr(const char __user *path,
const char __user *name);
asmlinkage long sys_fremovexattr(int fd, const char __user *name);
-
-/* fs/dcache.c */
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
-
-/* fs/cookies.c */
-asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len);
-
-/* fs/eventfd.c */
asmlinkage long sys_eventfd2(unsigned int count, int flags);
-
-/* fs/eventpoll.c */
asmlinkage long sys_epoll_create1(int flags);
asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
struct epoll_event __user *event);
@@ -393,8 +372,6 @@ asmlinkage long sys_epoll_pwait2(int epfd, struct epoll_event __user *events,
const struct __kernel_timespec __user *timeout,
const sigset_t __user *sigmask,
size_t sigsetsize);
-
-/* fs/fcntl.c */
asmlinkage long sys_dup(unsigned int fildes);
asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
@@ -402,25 +379,15 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
asmlinkage long sys_fcntl64(unsigned int fd,
unsigned int cmd, unsigned long arg);
#endif
-
-/* fs/inotify_user.c */
asmlinkage long sys_inotify_init1(int flags);
asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
u32 mask);
asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd);
-
-/* fs/ioctl.c */
asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd,
unsigned long arg);
-
-/* fs/ioprio.c */
asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
asmlinkage long sys_ioprio_get(int which, int who);
-
-/* fs/locks.c */
asmlinkage long sys_flock(unsigned int fd, unsigned int cmd);
-
-/* fs/namei.c */
asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode,
unsigned dev);
asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode);
@@ -431,18 +398,12 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
int newdfd, const char __user *newname, int flags);
asmlinkage long sys_renameat(int olddfd, const char __user * oldname,
int newdfd, const char __user * newname);
-
-/* fs/namespace.c */
asmlinkage long sys_umount(char __user *name, int flags);
asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
char __user *type, unsigned long flags,
void __user *data);
asmlinkage long sys_pivot_root(const char __user *new_root,
const char __user *put_old);
-
-/* fs/nfsctl.c */
-
-/* fs/open.c */
asmlinkage long sys_statfs(const char __user * path,
struct statfs __user *buf);
asmlinkage long sys_statfs64(const char __user *path, size_t sz,
@@ -450,6 +411,12 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz,
asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
struct statfs64 __user *buf);
+asmlinkage long sys_statmount(const struct mnt_id_req __user *req,
+ struct statmount __user *buf, size_t bufsize,
+ unsigned int flags);
+asmlinkage long sys_listmount(const struct mnt_id_req __user *req,
+ u64 __user *mnt_ids, size_t nr_mnt_ids,
+ unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
#if BITS_PER_LONG == 32
@@ -464,8 +431,10 @@ asmlinkage long sys_chdir(const char __user *filename);
asmlinkage long sys_fchdir(unsigned int fd);
asmlinkage long sys_chroot(const char __user *filename);
asmlinkage long sys_fchmod(unsigned int fd, umode_t mode);
-asmlinkage long sys_fchmodat(int dfd, const char __user * filename,
+asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
umode_t mode);
+asmlinkage long sys_fchmodat2(int dfd, const char __user *filename,
+ umode_t mode, unsigned int flags);
asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
gid_t group, int flag);
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
@@ -477,22 +446,14 @@ asmlinkage long sys_close(unsigned int fd);
asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd,
unsigned int flags);
asmlinkage long sys_vhangup(void);
-
-/* fs/pipe.c */
asmlinkage long sys_pipe2(int __user *fildes, int flags);
-
-/* fs/quota.c */
asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special,
qid_t id, void __user *addr);
asmlinkage long sys_quotactl_fd(unsigned int fd, unsigned int cmd, qid_t id,
void __user *addr);
-
-/* fs/readdir.c */
asmlinkage long sys_getdents64(unsigned int fd,
struct linux_dirent64 __user *dirent,
unsigned int count);
-
-/* fs/read_write.c */
asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
unsigned long offset_low, loff_t __user *result,
unsigned int whence);
@@ -515,12 +476,8 @@ asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
-
-/* fs/sendfile.c */
asmlinkage long sys_sendfile64(int out_fd, int in_fd,
loff_t __user *offset, size_t count);
-
-/* fs/select.c */
asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
fd_set __user *, struct __kernel_timespec __user *,
void __user *);
@@ -533,19 +490,13 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
asmlinkage long sys_ppoll_time32(struct pollfd __user *, unsigned int,
struct old_timespec32 __user *, const sigset_t __user *,
size_t);
-
-/* fs/signalfd.c */
asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags);
-
-/* fs/splice.c */
asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
unsigned long nr_segs, unsigned int flags);
asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
int fd_out, loff_t __user *off_out,
size_t len, unsigned int flags);
asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags);
-
-/* fs/stat.c */
asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
int bufsiz);
asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
@@ -556,8 +507,6 @@ asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
struct stat64 __user *statbuf, int flag);
#endif
-
-/* fs/sync.c */
asmlinkage long sys_sync(void);
asmlinkage long sys_fsync(unsigned int fd);
asmlinkage long sys_fdatasync(unsigned int fd);
@@ -565,8 +514,6 @@ asmlinkage long sys_sync_file_range2(int fd, unsigned int flags,
loff_t offset, loff_t nbytes);
asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
unsigned int flags);
-
-/* fs/timerfd.c */
asmlinkage long sys_timerfd_create(int clockid, int flags);
asmlinkage long sys_timerfd_settime(int ufd, int flags,
const struct __kernel_itimerspec __user *utmr,
@@ -577,39 +524,25 @@ asmlinkage long sys_timerfd_gettime32(int ufd,
asmlinkage long sys_timerfd_settime32(int ufd, int flags,
const struct old_itimerspec32 __user *utmr,
struct old_itimerspec32 __user *otmr);
-
-/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
struct __kernel_timespec __user *utimes,
int flags);
asmlinkage long sys_utimensat_time32(unsigned int dfd,
const char __user *filename,
struct old_timespec32 __user *t, int flags);
-
-/* kernel/acct.c */
asmlinkage long sys_acct(const char __user *name);
-
-/* kernel/capability.c */
asmlinkage long sys_capget(cap_user_header_t header,
cap_user_data_t dataptr);
asmlinkage long sys_capset(cap_user_header_t header,
const cap_user_data_t data);
-
-/* kernel/exec_domain.c */
asmlinkage long sys_personality(unsigned int personality);
-
-/* kernel/exit.c */
asmlinkage long sys_exit(int error_code);
asmlinkage long sys_exit_group(int error_code);
asmlinkage long sys_waitid(int which, pid_t pid,
struct siginfo __user *infop,
int options, struct rusage __user *ru);
-
-/* kernel/fork.c */
asmlinkage long sys_set_tid_address(int __user *tidptr);
asmlinkage long sys_unshare(unsigned long unshare_flags);
-
-/* kernel/futex/syscalls.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
const struct __kernel_timespec __user *utime,
u32 __user *uaddr2, u32 val3);
@@ -626,30 +559,30 @@ asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
unsigned int nr_futexes, unsigned int flags,
struct __kernel_timespec __user *timeout, clockid_t clockid);
-/* kernel/hrtimer.c */
+asmlinkage long sys_futex_wake(void __user *uaddr, unsigned long mask, int nr, unsigned int flags);
+
+asmlinkage long sys_futex_wait(void __user *uaddr, unsigned long val, unsigned long mask,
+ unsigned int flags, struct __kernel_timespec __user *timespec,
+ clockid_t clockid);
+
+asmlinkage long sys_futex_requeue(struct futex_waitv __user *waiters,
+ unsigned int flags, int nr_wake, int nr_requeue);
+
asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
struct __kernel_timespec __user *rmtp);
asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
-
-/* kernel/itimer.c */
asmlinkage long sys_getitimer(int which, struct __kernel_old_itimerval __user *value);
asmlinkage long sys_setitimer(int which,
struct __kernel_old_itimerval __user *value,
struct __kernel_old_itimerval __user *ovalue);
-
-/* kernel/kexec.c */
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
struct kexec_segment __user *segments,
unsigned long flags);
-
-/* kernel/module.c */
asmlinkage long sys_init_module(void __user *umod, unsigned long len,
const char __user *uargs);
asmlinkage long sys_delete_module(const char __user *name_user,
unsigned int flags);
-
-/* kernel/posix-timers.c */
asmlinkage long sys_timer_create(clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id);
@@ -683,15 +616,9 @@ asmlinkage long sys_clock_getres_time32(clockid_t which_clock,
asmlinkage long sys_clock_nanosleep_time32(clockid_t which_clock, int flags,
struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
-
-/* kernel/printk.c */
asmlinkage long sys_syslog(int type, char __user *buf, int len);
-
-/* kernel/ptrace.c */
asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
unsigned long data);
-/* kernel/sched/core.c */
-
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
@@ -710,8 +637,6 @@ asmlinkage long sys_sched_rr_get_interval(pid_t pid,
struct __kernel_timespec __user *interval);
asmlinkage long sys_sched_rr_get_interval_time32(pid_t pid,
struct old_timespec32 __user *interval);
-
-/* kernel/signal.c */
asmlinkage long sys_restart_syscall(void);
asmlinkage long sys_kill(pid_t pid, int sig);
asmlinkage long sys_tkill(pid_t pid, int sig);
@@ -737,8 +662,6 @@ asmlinkage long sys_rt_sigtimedwait_time32(const sigset_t __user *uthese,
const struct old_timespec32 __user *uts,
size_t sigsetsize);
asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
-
-/* kernel/sys.c */
asmlinkage long sys_setpriority(int which, int who, int niceval);
asmlinkage long sys_getpriority(int which, int who);
asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd,
@@ -772,16 +695,12 @@ asmlinkage long sys_umask(int mask);
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
-
-/* kernel/time.c */
asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p);
asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p);
-
-/* kernel/sys.c */
asmlinkage long sys_getpid(void);
asmlinkage long sys_getppid(void);
asmlinkage long sys_getuid(void);
@@ -790,8 +709,6 @@ asmlinkage long sys_getgid(void);
asmlinkage long sys_getegid(void);
asmlinkage long sys_gettid(void);
asmlinkage long sys_sysinfo(struct sysinfo __user *info);
-
-/* ipc/mqueue.c */
asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr);
asmlinkage long sys_mq_unlink(const char __user *name);
asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct __kernel_timespec __user *abs_timeout);
@@ -806,8 +723,6 @@ asmlinkage long sys_mq_timedsend_time32(mqd_t mqdes,
const char __user *u_msg_ptr,
unsigned int msg_len, unsigned int msg_prio,
const struct old_timespec32 __user *u_abs_timeout);
-
-/* ipc/msg.c */
asmlinkage long sys_msgget(key_t key, int msgflg);
asmlinkage long sys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
@@ -815,8 +730,6 @@ asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp,
size_t msgsz, long msgtyp, int msgflg);
asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp,
size_t msgsz, int msgflg);
-
-/* ipc/sem.c */
asmlinkage long sys_semget(key_t key, int nsems, int semflg);
asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
asmlinkage long sys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
@@ -828,15 +741,11 @@ asmlinkage long sys_semtimedop_time32(int semid, struct sembuf __user *sops,
const struct old_timespec32 __user *timeout);
asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
unsigned nsops);
-
-/* ipc/shm.c */
asmlinkage long sys_shmget(key_t key, size_t size, int flag);
asmlinkage long sys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
asmlinkage long sys_shmdt(char __user *shmaddr);
-
-/* net/socket.c */
asmlinkage long sys_socket(int, int, int);
asmlinkage long sys_socketpair(int, int, int, int __user *);
asmlinkage long sys_bind(int, struct sockaddr __user *, int);
@@ -856,18 +765,12 @@ asmlinkage long sys_getsockopt(int fd, int level, int optname,
asmlinkage long sys_shutdown(int, int);
asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
-
-/* mm/filemap.c */
asmlinkage long sys_readahead(int fd, loff_t offset, size_t count);
-
-/* mm/nommu.c, also with MMU */
asmlinkage long sys_brk(unsigned long brk);
asmlinkage long sys_munmap(unsigned long addr, size_t len);
asmlinkage long sys_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr);
-
-/* security/keys/keyctl.c */
asmlinkage long sys_add_key(const char __user *_type,
const char __user *_description,
const void __user *_payload,
@@ -879,8 +782,6 @@ asmlinkage long sys_request_key(const char __user *_type,
key_serial_t destringid);
asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
-
-/* arch/example/kernel/sys_example.c */
#ifdef CONFIG_CLONE_BACKWARDS
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, unsigned long,
int __user *);
@@ -899,11 +800,9 @@ asmlinkage long sys_clone3(struct clone_args __user *uargs, size_t size);
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp);
-
-/* mm/fadvise.c */
asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
-/* mm/, CONFIG_MMU only */
+/* CONFIG_MMU only */
asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags);
asmlinkage long sys_swapoff(const char __user *specialfile);
asmlinkage long sys_mprotect(unsigned long start, size_t len,
@@ -941,7 +840,6 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
const int __user *nodes,
int __user *status,
int flags);
-
asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
siginfo_t __user *uinfo);
asmlinkage long sys_perf_event_open(
@@ -954,7 +852,6 @@ asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
asmlinkage long sys_recvmmsg_time32(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
struct old_timespec32 __user *timeout);
-
asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
int options, struct rusage __user *ru);
asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
@@ -1058,12 +955,21 @@ asmlinkage long sys_memfd_secret(unsigned int flags);
asmlinkage long sys_set_mempolicy_home_node(unsigned long start, unsigned long len,
unsigned long home_node,
unsigned long flags);
+asmlinkage long sys_cachestat(unsigned int fd,
+ struct cachestat_range __user *cstat_range,
+ struct cachestat __user *cstat, unsigned int flags);
+asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
+asmlinkage long sys_lsm_get_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t *size, __u32 flags);
+asmlinkage long sys_lsm_set_self_attr(unsigned int attr, struct lsm_ctx *ctx,
+ size_t size, __u32 flags);
+asmlinkage long sys_lsm_list_modules(u64 *ids, size_t *size, u32 flags);
/*
* Architecture-specific system calls
*/
-/* arch/x86/kernel/ioport.c */
+/* x86 */
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
/* pciconfig: alpha, arm, arm64, ia64, sparc */
@@ -1171,11 +1077,11 @@ asmlinkage long sys_sysfs(int option,
unsigned long arg1, unsigned long arg2);
asmlinkage long sys_fork(void);
-/* obsolete: kernel/time/time.c */
+/* obsolete */
asmlinkage long sys_stime(__kernel_old_time_t __user *tptr);
asmlinkage long sys_stime32(old_time32_t __user *tptr);
-/* obsolete: kernel/signal.c */
+/* obsolete */
asmlinkage long sys_sigpending(old_sigset_t __user *uset);
asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set,
old_sigset_t __user *oset);
@@ -1195,19 +1101,19 @@ asmlinkage long sys_sgetmask(void);
asmlinkage long sys_ssetmask(int newmask);
asmlinkage long sys_signal(int sig, __sighandler_t handler);
-/* obsolete: kernel/sched/core.c */
+/* obsolete */
asmlinkage long sys_nice(int increment);
-/* obsolete: kernel/kexec_file.c */
+/* obsolete */
asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd,
unsigned long cmdline_len,
const char __user *cmdline_ptr,
unsigned long flags);
-/* obsolete: kernel/exit.c */
+/* obsolete */
asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options);
-/* obsolete: kernel/uid16.c */
+/* obsolete */
#ifdef CONFIG_HAVE_UID16
asmlinkage long sys_chown16(const char __user *filename,
old_uid_t user, old_gid_t group);
@@ -1234,10 +1140,10 @@ asmlinkage long sys_getgid16(void);
asmlinkage long sys_getegid16(void);
#endif
-/* obsolete: net/socket.c */
+/* obsolete */
asmlinkage long sys_socketcall(int call, unsigned long __user *args);
-/* obsolete: fs/stat.c */
+/* obsolete */
asmlinkage long sys_stat(const char __user *filename,
struct __old_kernel_stat __user *statbuf);
asmlinkage long sys_lstat(const char __user *filename,
@@ -1247,13 +1153,13 @@ asmlinkage long sys_fstat(unsigned int fd,
asmlinkage long sys_readlink(const char __user *path,
char __user *buf, int bufsiz);
-/* obsolete: fs/select.c */
+/* obsolete */
asmlinkage long sys_old_select(struct sel_arg_struct __user *arg);
-/* obsolete: fs/readdir.c */
+/* obsolete */
asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
-/* obsolete: kernel/sys.c */
+/* obsolete */
asmlinkage long sys_gethostname(char __user *name, int len);
asmlinkage long sys_uname(struct old_utsname __user *);
asmlinkage long sys_olduname(struct oldold_utsname __user *);
@@ -1261,11 +1167,11 @@ asmlinkage long sys_olduname(struct oldold_utsname __user *);
asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim);
#endif
-/* obsolete: ipc */
+/* obsolete */
asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
unsigned long third, void __user *ptr, long fifth);
-/* obsolete: mm/ */
+/* obsolete */
asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
@@ -1280,6 +1186,7 @@ asmlinkage long sys_ni_syscall(void);
#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+asmlinkage long sys_ni_posix_timers(void);
/*
* Kernel code should not call syscalls (i.e., sys_xyzyyz()) directly.
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 3d08277959af..ee7d33b89e9e 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -89,7 +89,7 @@ int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
/*
- * Register a set of sysctl names by calling register_sysctl_table
+ * Register a set of sysctl names by calling register_sysctl
* with an initialised array of struct ctl_table's. An entry with
* NULL procname terminates the table. table->de will be
* set up by the registration and need not be initialised in advance.
@@ -137,7 +137,17 @@ struct ctl_table {
void *data;
int maxlen;
umode_t mode;
- struct ctl_table *child; /* Deprecated */
+ /**
+ * enum type - Enumeration to differentiate between ctl target types
+ * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations
+ * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently
+ * empty directory target to serve
+ * as mount point.
+ */
+ enum {
+ SYSCTL_TABLE_TYPE_DEFAULT,
+ SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY
+ } type;
proc_handler *proc_handler; /* Callback for text formatting */
struct ctl_table_poll *poll;
void *extra1;
@@ -149,12 +159,22 @@ struct ctl_node {
struct ctl_table_header *header;
};
-/* struct ctl_table_header is used to maintain dynamic lists of
- struct ctl_table trees. */
+/**
+ * struct ctl_table_header - maintains dynamic lists of struct ctl_table trees
+ * @ctl_table: pointer to the first element in ctl_table array
+ * @ctl_table_size: number of elements pointed by @ctl_table
+ * @used: The entry will never be touched when equal to 0.
+ * @count: Upped every time something is added to @inodes and downed every time
+ * something is removed from inodes
+ * @nreg: When nreg drops to 0 the ctl_table_header will be unregistered.
+ * @rcu: Delays the freeing of the inode. Introduced with "unfuck proc_sysctl ->d_compare()"
+ *
+ */
struct ctl_table_header {
union {
struct {
struct ctl_table *ctl_table;
+ int ctl_table_size;
int used;
int count;
int nreg;
@@ -190,27 +210,11 @@ struct ctl_table_root {
int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
};
-/* struct ctl_path describes where in the hierarchy a table is added */
-struct ctl_path {
- const char *procname;
-};
+#define register_sysctl(path, table) \
+ register_sysctl_sz(path, table, ARRAY_SIZE(table))
#ifdef CONFIG_SYSCTL
-#define DECLARE_SYSCTL_BASE(_name, _table) \
-static struct ctl_table _name##_base_table[] = { \
- { \
- .procname = #_name, \
- .mode = 0555, \
- .child = _table, \
- }, \
- { }, \
-}
-
-extern int __register_sysctl_base(struct ctl_table *base_table);
-
-#define register_sysctl_base(_name) __register_sysctl_base(_name##_base_table)
-
void proc_sys_poll_notify(struct ctl_table_poll *poll);
extern void setup_sysctl_set(struct ctl_table_set *p,
@@ -220,18 +224,20 @@ extern void retire_sysctl_set(struct ctl_table_set *set);
struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
- const char *path, struct ctl_table *table);
-struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table);
-struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
+ const char *path, struct ctl_table *table, size_t table_size);
+struct ctl_table_header *register_sysctl_sz(const char *path, struct ctl_table *table,
+ size_t table_size);
void unregister_sysctl_table(struct ctl_table_header * table);
extern int sysctl_init_bases(void);
extern void __register_sysctl_init(const char *path, struct ctl_table *table,
- const char *table_name);
-#define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table)
+ const char *table_name, size_t table_size);
+#define register_sysctl_init(path, table) \
+ __register_sysctl_init(path, table, #table, ARRAY_SIZE(table))
extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
void do_sysctl_args(void);
+bool sysctl_is_alias(char *param);
int do_proc_douintvec(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(unsigned long *lvalp,
@@ -244,24 +250,8 @@ extern int unaligned_enabled;
extern int unaligned_dump_stack;
extern int no_unaligned_warning;
-extern struct ctl_table sysctl_mount_point[];
-
#else /* CONFIG_SYSCTL */
-#define DECLARE_SYSCTL_BASE(_name, _table)
-
-static inline int __register_sysctl_base(struct ctl_table *base_table)
-{
- return 0;
-}
-
-#define register_sysctl_base(table) __register_sysctl_base(table)
-
-static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
-{
- return NULL;
-}
-
static inline void register_sysctl_init(const char *path, struct ctl_table *table)
{
}
@@ -271,7 +261,9 @@ static inline struct ctl_table_header *register_sysctl_mount_point(const char *p
return NULL;
}
-static inline struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table)
+static inline struct ctl_table_header *register_sysctl_sz(const char *path,
+ struct ctl_table *table,
+ size_t table_size)
{
return NULL;
}
@@ -289,6 +281,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
static inline void do_sysctl_args(void)
{
}
+
+static inline bool sysctl_is_alias(char *param)
+{
+ return false;
+}
#endif /* CONFIG_SYSCTL */
int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index c1ef5fc60a3c..19cb803dd5ec 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -9,7 +9,8 @@
#include <linux/kernel.h>
#include <linux/platform_data/simplefb.h>
-#include <linux/screen_info.h>
+
+struct screen_info;
enum {
M_I17, /* 17-Inch iMac */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index fd3fe5c8c17f..b717a70219f6 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -181,6 +181,8 @@ struct bin_attribute {
char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
+ loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *,
+ loff_t, int);
int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
struct vm_area_struct *vma);
};
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 3a582ec7a2f1..bdca467ebb77 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -30,7 +30,7 @@
#define SYSRQ_ENABLE_RTNICE 0x0100
struct sysrq_key_op {
- void (* const handler)(int);
+ void (* const handler)(u8);
const char * const help_msg;
const char * const action_msg;
const int enable_mask;
@@ -43,10 +43,10 @@ struct sysrq_key_op {
* are available -- else NULL's).
*/
-void handle_sysrq(int key);
-void __handle_sysrq(int key, bool check_mask);
-int register_sysrq_key(int key, const struct sysrq_key_op *op);
-int unregister_sysrq_key(int key, const struct sysrq_key_op *op);
+void handle_sysrq(u8 key);
+void __handle_sysrq(u8 key, bool check_mask);
+int register_sysrq_key(u8 key, const struct sysrq_key_op *op);
+int unregister_sysrq_key(u8 key, const struct sysrq_key_op *op);
extern const struct sysrq_key_op *__sysrq_reboot_op;
int sysrq_toggle_support(int enable_mask);
@@ -54,20 +54,20 @@ int sysrq_mask(void);
#else
-static inline void handle_sysrq(int key)
+static inline void handle_sysrq(u8 key)
{
}
-static inline void __handle_sysrq(int key, bool check_mask)
+static inline void __handle_sysrq(u8 key, bool check_mask)
{
}
-static inline int register_sysrq_key(int key, const struct sysrq_key_op *op)
+static inline int register_sysrq_key(u8 key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
-static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op)
+static inline int unregister_sysrq_key(u8 key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
index b0d36a9934cc..5cf6f6f82aa7 100644
--- a/include/linux/tca6416_keypad.h
+++ b/include/linux/tca6416_keypad.h
@@ -25,7 +25,6 @@ struct tca6416_keys_platform_data {
unsigned int rep:1; /* enable input subsystem auto repeat */
uint16_t pinmask;
uint16_t invert;
- int irq_is_gpio;
int use_polling; /* use polling if Interrupt is not connected*/
};
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b4c08ac86983..a1c47a6d69b0 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -152,6 +152,7 @@ struct tcp_request_sock {
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
bool is_mptcp;
+ bool req_usec_ts;
#if IS_ENABLED(CONFIG_MPTCP)
bool drop_req;
#endif
@@ -165,6 +166,11 @@ struct tcp_request_sock {
* after data-in-SYN.
*/
u8 syn_tos;
+#ifdef CONFIG_TCP_AO
+ u8 ao_keyid;
+ u8 ao_rcv_next;
+ bool used_tcp_ao;
+#endif
};
static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -172,24 +178,135 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
return (struct tcp_request_sock *)req;
}
+static inline bool tcp_rsk_used_ao(const struct request_sock *req)
+{
+#ifndef CONFIG_TCP_AO
+ return false;
+#else
+ return tcp_rsk(req)->used_tcp_ao;
+#endif
+}
+
+#define TCP_RMEM_TO_WIN_SCALE 8
+
struct tcp_sock {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/tcp_sock.rst.
+ * Please update the document when adding new fields.
+ */
+
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
- u16 tcp_header_len; /* Bytes of tcp header to send */
+
+ /* TX read-mostly hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_read_tx);
+ /* timestamp of last sent data packet (for restart window) */
+ u32 max_window; /* Maximal window ever seen from peer */
+ u32 rcv_ssthresh; /* Current window clamp */
+ u32 reordering; /* Packet reordering metric. */
+ u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
u16 gso_segs; /* Max number of segs per GSO packet */
+ /* from STCP, retrans queue hinting */
+ struct sk_buff *lost_skb_hint;
+ struct sk_buff *retransmit_skb_hint;
+ __cacheline_group_end(tcp_sock_read_tx);
+
+ /* TXRX read-mostly hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_read_txrx);
+ u32 tsoffset; /* timestamp offset */
+ u32 snd_wnd; /* The window we expect to receive */
+ u32 mss_cache; /* Cached effective mss, not including SACKS */
+ u32 snd_cwnd; /* Sending congestion window */
+ u32 prr_out; /* Total number of pkts sent during Recovery. */
+ u32 lost_out; /* Lost packets */
+ u32 sacked_out; /* SACK'd packets */
+ u16 tcp_header_len; /* Bytes of tcp header to send */
+ u8 scaling_ratio; /* see tcp_win_from_space() */
+ u8 chrono_type : 2, /* current chronograph type */
+ repair : 1,
+ tcp_usec_ts : 1, /* TSval values in usec */
+ is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
+ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
+ __cacheline_group_end(tcp_sock_read_txrx);
+
+ /* RX read-mostly hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_read_rx);
+ u32 copied_seq; /* Head of yet unread data */
+ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
+ u32 snd_wl1; /* Sequence for window update */
+ u32 tlp_high_seq; /* snd_nxt at the time of TLP */
+ u32 rttvar_us; /* smoothed mdev_max */
+ u32 retrans_out; /* Retransmitted packets out */
+ u16 advmss; /* Advertised MSS */
+ u16 urg_data; /* Saved octet of OOB data and control flags */
+ u32 lost; /* Total data packets lost incl. rexmits */
+ struct minmax rtt_min;
+ /* OOO segments go in this rbtree. Socket lock must be held. */
+ struct rb_root out_of_order_queue;
+ u32 snd_ssthresh; /* Slow start size threshold */
+ __cacheline_group_end(tcp_sock_read_rx);
+
+ /* TX read-write hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_write_tx) ____cacheline_aligned;
+ u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
+ * The total number of segments sent.
+ */
+ u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
+ * total number of data segments sent.
+ */
+ u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
+ * total number of data bytes sent.
+ */
+ u32 snd_sml; /* Last byte of the most recently transmitted small packet */
+ u32 chrono_start; /* Start time in jiffies of a TCP chrono */
+ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
+ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
+ u32 pushed_seq; /* Last pushed seq, required to talk to windows */
+ u32 lsndtime;
+ u32 mdev_us; /* medium deviation */
+ u64 tcp_wstamp_ns; /* departure time for next sent data packet */
+ u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
+ u64 tcp_mstamp; /* most recent packet received/sent */
+ u32 rtt_seq; /* sequence number to update rttvar */
+ struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
+ struct sk_buff *highest_sack; /* skb just after the highest
+ * skb with SACKed bit set
+ * (validity guaranteed only if
+ * sacked_out > 0)
+ */
+ u8 ecn_flags; /* ECN status bits. */
+ __cacheline_group_end(tcp_sock_write_tx);
+ /* TXRX read-write hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_write_txrx);
/*
* Header prediction flags
* 0x5?10 << 16 + snd_wnd in net byte order
*/
__be32 pred_flags;
-
+ u32 rcv_nxt; /* What we want to receive next */
+ u32 snd_nxt; /* Next sequence we send */
+ u32 snd_una; /* First byte we want an ack for */
+ u32 window_clamp; /* Maximal window to advertise */
+ u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ u32 packets_out; /* Packets which are "in flight" */
+ u32 snd_up; /* Urgent pointer */
+ u32 delivered; /* Total data packets delivered incl. rexmits */
+ u32 delivered_ce; /* Like the above but only ECE marked packets */
+ u32 app_limited; /* limited until "delivered" reaches this val */
+ u32 rcv_wnd; /* Current receiver window */
/*
- * RFC793 variables by their proper names. This means you can
- * read the code and the spec side by side (and laugh ...)
- * See RFC793 and RFC1122. The RFC writes these in capitals.
+ * Options received (usually on last packet, some only on SYN packets).
*/
- u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ struct tcp_options_received rx_opt;
+ u8 nonagle : 4,/* Disable Nagle algorithm? */
+ rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
+ __cacheline_group_end(tcp_sock_write_txrx);
+
+ /* RX read-write hotpath cache lines */
+ __cacheline_group_begin(tcp_sock_write_rx);
+ u64 bytes_received;
+ /* RFC4898 tcpEStatsAppHCThruOctetsReceived
* sum(delta(rcv_nxt)), or how many bytes
* were acked.
*/
@@ -199,45 +316,43 @@ struct tcp_sock {
u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn
* total number of data segments in.
*/
- u32 rcv_nxt; /* What we want to receive next */
- u32 copied_seq; /* Head of yet unread data */
u32 rcv_wup; /* rcv_nxt on last window update sent */
- u32 snd_nxt; /* Next sequence we send */
- u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut
- * The total number of segments sent.
- */
- u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
- * total number of data segments sent.
- */
- u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
- * total number of data bytes sent.
- */
+ u32 max_packets_out; /* max packets_out in last window */
+ u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
+ u32 rate_delivered; /* saved rate sample: packets delivered */
+ u32 rate_interval_us; /* saved rate sample: time elapsed */
+ u32 rcv_rtt_last_tsecr;
+ u64 first_tx_mstamp; /* start of window send phase */
+ u64 delivered_mstamp; /* time we reached "delivered" */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
+ struct {
+ u32 rtt_us;
+ u32 seq;
+ u64 time;
+ } rcv_rtt_est;
+/* Receiver queue space */
+ struct {
+ u32 space;
+ u32 seq;
+ u64 time;
+ } rcvq_space;
+ __cacheline_group_end(tcp_sock_write_rx);
+ /* End of Hot Path */
+
+/*
+ * RFC793 variables by their proper names. This means you can
+ * read the code and the spec side by side (and laugh ...)
+ * See RFC793 and RFC1122. The RFC writes these in capitals.
+ */
u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
* total number of DSACK blocks received
*/
- u32 snd_una; /* First byte we want an ack for */
- u32 snd_sml; /* Last byte of the most recently transmitted small packet */
- u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
- u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
u32 compressed_ack_rcv_nxt;
-
- u32 tsoffset; /* timestamp offset */
-
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
- struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
-
- u32 snd_wl1; /* Sequence for window update */
- u32 snd_wnd; /* The window we expect to receive */
- u32 max_window; /* Maximal window ever seen from peer */
- u32 mss_cache; /* Cached effective mss, not including SACKS */
-
- u32 window_clamp; /* Maximal window to advertise */
- u32 rcv_ssthresh; /* Current window clamp */
/* Information of the most recently (s)acked skb */
struct tcp_rack {
@@ -251,23 +366,15 @@ struct tcp_sock {
dsack_seen:1, /* Whether DSACK seen after last adj */
advanced:1; /* mstamp advanced since last lost marking */
} rack;
- u16 advmss; /* Advertised MSS */
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1, /* TLP is a retransmission */
unused:5;
- u32 chrono_start; /* Start time in jiffies of a TCP chrono */
- u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
- u8 chrono_type:2, /* current chronograph type */
- rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ u8 thin_lto : 1,/* Use linear timeouts for thin streams */
+ recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
- is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
- fastopen_client_fail:2; /* reason why fastopen failed */
- u8 nonagle : 4,/* Disable Nagle algorithm? */
- thin_lto : 1,/* Use linear timeouts for thin streams */
- recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
- repair : 1,
+ fastopen_client_fail:2, /* reason why fastopen failed */
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
u8 save_syn:2, /* Save headers of SYN packet */
@@ -275,45 +382,19 @@ struct tcp_sock {
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
- syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
- is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
- u32 tlp_high_seq; /* snd_nxt at the time of TLP */
+ syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
- u64 tcp_wstamp_ns; /* departure time for next sent data packet */
- u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
/* RTT measurement */
- u64 tcp_mstamp; /* most recent packet received/sent */
- u32 srtt_us; /* smoothed round trip time << 3 in usecs */
- u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
- u32 rttvar_us; /* smoothed mdev_max */
- u32 rtt_seq; /* sequence number to update rttvar */
- struct minmax rtt_min;
- u32 packets_out; /* Packets which are "in flight" */
- u32 retrans_out; /* Retransmitted packets out */
- u32 max_packets_out; /* max packets_out in last window */
- u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */
-
- u16 urg_data; /* Saved octet of OOB data and control flags */
- u8 ecn_flags; /* ECN status bits. */
u8 keepalive_probes; /* num of allowed keep alive probes */
- u32 reordering; /* Packet reordering metric. */
u32 reord_seen; /* number of data packet reordering events */
- u32 snd_up; /* Urgent pointer */
-
-/*
- * Options received (usually on last packet, some only on SYN packets).
- */
- struct tcp_options_received rx_opt;
/*
* Slow start and congestion control (see also Nagle, and Karn & Partridge)
*/
- u32 snd_ssthresh; /* Slow start size threshold */
- u32 snd_cwnd; /* Sending congestion window */
u32 snd_cwnd_cnt; /* Linear increase counter */
u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
u32 snd_cwnd_used;
@@ -321,32 +402,10 @@ struct tcp_sock {
u32 prior_cwnd; /* cwnd right before starting loss recovery */
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
- u32 prr_out; /* Total number of pkts sent during Recovery. */
- u32 delivered; /* Total data packets delivered incl. rexmits */
- u32 delivered_ce; /* Like the above but only ECE marked packets */
- u32 lost; /* Total data packets lost incl. rexmits */
- u32 app_limited; /* limited until "delivered" reaches this val */
- u64 first_tx_mstamp; /* start of window send phase */
- u64 delivered_mstamp; /* time we reached "delivered" */
- u32 rate_delivered; /* saved rate sample: packets delivered */
- u32 rate_interval_us; /* saved rate sample: time elapsed */
-
- u32 rcv_wnd; /* Current receiver window */
- u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
- u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
- u32 pushed_seq; /* Last pushed seq, required to talk to windows */
- u32 lost_out; /* Lost packets */
- u32 sacked_out; /* SACK'd packets */
struct hrtimer pacing_timer;
struct hrtimer compressed_ack_timer;
- /* from STCP, retrans queue hinting */
- struct sk_buff* lost_skb_hint;
- struct sk_buff *retransmit_skb_hint;
-
- /* OOO segments go in this rbtree. Socket lock must be held. */
- struct rb_root out_of_order_queue;
struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
@@ -355,12 +414,6 @@ struct tcp_sock {
struct tcp_sack_block recv_sack_cache[4];
- struct sk_buff *highest_sack; /* skb just after the highest
- * skb with SACKed bit set
- * (validity guaranteed only if
- * sacked_out > 0)
- */
-
int lost_cnt_hint;
u32 prior_ssthresh; /* ssthresh saved at recovery start */
@@ -375,6 +428,14 @@ struct tcp_sock {
* Total data bytes retransmitted
*/
u32 total_retrans; /* Total retransmits for entire connection */
+ u32 rto_stamp; /* Start time (ms) of last CA_Loss recovery */
+ u16 total_rto; /* Total number of RTO timeouts, including
+ * SYN/SYN-ACK and recurring timeouts.
+ */
+ u16 total_rto_recoveries; /* Total number of RTO recoveries,
+ * including any unfinished recovery.
+ */
+ u32 total_rto_time; /* ms spent in (completed) RTO recoveries. */
u32 urg_seq; /* Seq of received urgent pointer */
unsigned int keepalive_time; /* time before keep alive takes place */
@@ -403,21 +464,6 @@ struct tcp_sock {
u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */
-/* Receiver side RTT estimation */
- u32 rcv_rtt_last_tsecr;
- struct {
- u32 rtt_us;
- u32 seq;
- u64 time;
- } rcv_rtt_est;
-
-/* Receiver queue space */
- struct {
- u32 space;
- u32 seq;
- u64 time;
- } rcvq_space;
-
/* TCP-specific MTU probe information. */
struct {
u32 probe_seq_start;
@@ -435,13 +481,18 @@ struct tcp_sock {
bool syn_smc; /* SYN includes SMC */
#endif
-#ifdef CONFIG_TCP_MD5SIG
-/* TCP AF-Specific parts; only used by MD5 Signature support so far */
+#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
+/* TCP AF-Specific parts; only used by TCP-AO/MD5 Signature support so far */
const struct tcp_sock_af_ops *af_specific;
+#ifdef CONFIG_TCP_MD5SIG
/* TCP MD5 Signature Option information */
struct tcp_md5sig_info __rcu *md5sig_info;
#endif
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info __rcu *ao_info;
+#endif
+#endif
/* TCP fastopen related information */
struct tcp_fastopen_request *fastopen_req;
@@ -461,15 +512,17 @@ enum tsq_enum {
TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call
* tcp_v{4|6}_mtu_reduced()
*/
+ TCP_ACK_DEFERRED, /* TX pure ack is deferred */
};
enum tsq_flags {
- TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
- TSQF_QUEUED = (1UL << TSQ_QUEUED),
- TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
- TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
- TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
- TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
+ TSQF_THROTTLED = BIT(TSQ_THROTTLED),
+ TSQF_QUEUED = BIT(TSQ_QUEUED),
+ TCPF_TSQ_DEFERRED = BIT(TCP_TSQ_DEFERRED),
+ TCPF_WRITE_TIMER_DEFERRED = BIT(TCP_WRITE_TIMER_DEFERRED),
+ TCPF_DELACK_TIMER_DEFERRED = BIT(TCP_DELACK_TIMER_DEFERRED),
+ TCPF_MTU_REDUCED_DEFERRED = BIT(TCP_MTU_REDUCED_DEFERRED),
+ TCPF_ACK_DEFERRED = BIT(TCP_ACK_DEFERRED),
};
#define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
@@ -495,6 +548,9 @@ struct tcp_timewait_sock {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
#endif
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info __rcu *ao_info;
+#endif
};
static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
@@ -513,7 +569,7 @@ static inline void fastopen_queue_tune(struct sock *sk, int backlog)
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
- queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
+ WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn));
}
static inline void tcp_move_syn(struct tcp_sock *tp,
@@ -562,6 +618,11 @@ void __tcp_sock_set_nodelay(struct sock *sk, bool on);
void tcp_sock_set_nodelay(struct sock *sk);
void tcp_sock_set_quickack(struct sock *sk, int val);
int tcp_sock_set_syncnt(struct sock *sk, int val);
-void tcp_sock_set_user_timeout(struct sock *sk, u32 val);
+int tcp_sock_set_user_timeout(struct sock *sk, int val);
+
+static inline bool dst_tcp_usec_ts(const struct dst_entry *dst)
+{
+ return dst_feature(dst, RTAX_FEATURE_TCP_USEC_TS);
+}
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 17eb1c5205d3..911ddf92dcee 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -84,6 +84,7 @@ struct tee_param {
* @release: release this open file
* @open_session: open a new session
* @close_session: close a session
+ * @system_session: declare session as a system session
* @invoke_func: invoke a trusted function
* @cancel_req: request cancel of an ongoing invoke or open
* @supp_recv: called for supplicant to get a command
@@ -100,6 +101,7 @@ struct tee_driver_ops {
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param);
int (*close_session)(struct tee_context *ctx, u32 session);
+ int (*system_session)(struct tee_context *ctx, u32 session);
int (*invoke_func)(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
@@ -430,6 +432,20 @@ int tee_client_open_session(struct tee_context *ctx,
int tee_client_close_session(struct tee_context *ctx, u32 session);
/**
+ * tee_client_system_session() - Declare session as a system session
+ * @ctx: TEE Context
+ * @session: Session id
+ *
+ * This function requests TEE to provision an entry context ready to use for
+ * that session only. The provisioned entry context is used for command
+ * invocation and session closure, not for command cancelling requests.
+ * TEE releases the provisioned context upon session closure.
+ *
+ * Return < 0 on error else 0 if an entry context has been provisioned.
+ */
+int tee_client_system_session(struct tee_context *ctx, u32 session);
+
+/**
* tee_client_invoke_func() - Invoke a function in a Trusted Application
* @ctx: TEE Context
* @arg: Invoke arguments, see description of
diff --git a/include/linux/tegra-icc.h b/include/linux/tegra-icc.h
new file mode 100644
index 000000000000..4b4d4bee290c
--- /dev/null
+++ b/include/linux/tegra-icc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022-2023 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef LINUX_TEGRA_ICC_H
+#define LINUX_TEGRA_ICC_H
+
+enum tegra_icc_client_type {
+ TEGRA_ICC_NONE,
+ TEGRA_ICC_NISO,
+ TEGRA_ICC_ISO_DISPLAY,
+ TEGRA_ICC_ISO_VI,
+ TEGRA_ICC_ISO_AUDIO,
+ TEGRA_ICC_ISO_VIFAL,
+};
+
+/* ICC ID's for MC client's used in BPMP */
+#define TEGRA_ICC_BPMP_DEBUG 1
+#define TEGRA_ICC_BPMP_CPU_CLUSTER0 2
+#define TEGRA_ICC_BPMP_CPU_CLUSTER1 3
+#define TEGRA_ICC_BPMP_CPU_CLUSTER2 4
+#define TEGRA_ICC_BPMP_GPU 5
+#define TEGRA_ICC_BPMP_CACTMON 6
+#define TEGRA_ICC_BPMP_DISPLAY 7
+#define TEGRA_ICC_BPMP_VI 8
+#define TEGRA_ICC_BPMP_EQOS 9
+#define TEGRA_ICC_BPMP_PCIE_0 10
+#define TEGRA_ICC_BPMP_PCIE_1 11
+#define TEGRA_ICC_BPMP_PCIE_2 12
+#define TEGRA_ICC_BPMP_PCIE_3 13
+#define TEGRA_ICC_BPMP_PCIE_4 14
+#define TEGRA_ICC_BPMP_PCIE_5 15
+#define TEGRA_ICC_BPMP_PCIE_6 16
+#define TEGRA_ICC_BPMP_PCIE_7 17
+#define TEGRA_ICC_BPMP_PCIE_8 18
+#define TEGRA_ICC_BPMP_PCIE_9 19
+#define TEGRA_ICC_BPMP_PCIE_10 20
+#define TEGRA_ICC_BPMP_DLA_0 21
+#define TEGRA_ICC_BPMP_DLA_1 22
+#define TEGRA_ICC_BPMP_SDMMC_1 23
+#define TEGRA_ICC_BPMP_SDMMC_2 24
+#define TEGRA_ICC_BPMP_SDMMC_3 25
+#define TEGRA_ICC_BPMP_SDMMC_4 26
+#define TEGRA_ICC_BPMP_NVDEC 27
+#define TEGRA_ICC_BPMP_NVENC 28
+#define TEGRA_ICC_BPMP_NVJPG_0 29
+#define TEGRA_ICC_BPMP_NVJPG_1 30
+#define TEGRA_ICC_BPMP_OFAA 31
+#define TEGRA_ICC_BPMP_XUSB_HOST 32
+#define TEGRA_ICC_BPMP_XUSB_DEV 33
+#define TEGRA_ICC_BPMP_TSEC 34
+#define TEGRA_ICC_BPMP_VIC 35
+#define TEGRA_ICC_BPMP_APE 36
+#define TEGRA_ICC_BPMP_APEDMA 37
+#define TEGRA_ICC_BPMP_SE 38
+#define TEGRA_ICC_BPMP_ISP 39
+#define TEGRA_ICC_BPMP_HDA 40
+#define TEGRA_ICC_BPMP_VIFAL 41
+#define TEGRA_ICC_BPMP_VI2FAL 42
+#define TEGRA_ICC_BPMP_VI2 43
+#define TEGRA_ICC_BPMP_RCE 44
+#define TEGRA_ICC_BPMP_PVA 45
+
+#endif /* LINUX_TEGRA_ICC_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 87837094d549..b7a3deb372fd 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -32,6 +32,7 @@
struct thermal_zone_device;
struct thermal_cooling_device;
struct thermal_instance;
+struct thermal_debugfs;
struct thermal_attr;
enum thermal_trend {
@@ -51,6 +52,25 @@ enum thermal_notify_event {
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */
THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */
THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */
+ THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */
+ THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */
+ THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */
+};
+
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @threshold: trip crossing notification threshold miliCelsius
+ * @type: trip point type
+ * @priv: pointer to driver data associated with this trip
+ */
+struct thermal_trip {
+ int temperature;
+ int hysteresis;
+ int threshold;
+ enum thermal_trip_type type;
+ void *priv;
};
struct thermal_zone_device_ops {
@@ -62,32 +82,16 @@ struct thermal_zone_device_ops {
int (*set_trips) (struct thermal_zone_device *, int, int);
int (*change_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
- int (*get_trip_type) (struct thermal_zone_device *, int,
- enum thermal_trip_type *);
- int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
int (*set_trip_temp) (struct thermal_zone_device *, int, int);
- int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
int (*get_crit_temp) (struct thermal_zone_device *, int *);
int (*set_emul_temp) (struct thermal_zone_device *, int);
- int (*get_trend) (struct thermal_zone_device *, int,
- enum thermal_trend *);
+ int (*get_trend) (struct thermal_zone_device *,
+ const struct thermal_trip *, enum thermal_trend *);
void (*hot)(struct thermal_zone_device *);
void (*critical)(struct thermal_zone_device *);
};
-/**
- * struct thermal_trip - representation of a point in temperature domain
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- */
-struct thermal_trip {
- int temperature;
- int hysteresis;
- enum thermal_trip_type type;
-};
-
struct thermal_cooling_device_ops {
int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
@@ -99,7 +103,7 @@ struct thermal_cooling_device_ops {
struct thermal_cooling_device {
int id;
- char *type;
+ const char *type;
unsigned long max_state;
struct device device;
struct device_node *np;
@@ -110,6 +114,9 @@ struct thermal_cooling_device {
struct mutex lock; /* protect thermal_instances list */
struct list_head thermal_instances;
struct list_head node;
+#ifdef CONFIG_THERMAL_DEBUGFS
+ struct thermal_debugfs *debugfs;
+#endif
};
/**
@@ -117,6 +124,7 @@ struct thermal_cooling_device {
* @id: unique id number for each thermal zone
* @type: the thermal zone device type
* @device: &struct device for this thermal zone
+ * @removal: removal completion
* @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
* @trip_type_attrs: attributes for trip points for sysfs: trip type
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
@@ -124,7 +132,6 @@ struct thermal_cooling_device {
* @devdata: private pointer for device private data
* @trips: an array of struct thermal_trip
* @num_trips: number of trip points the thermal zone supports
- * @trips_disabled; bitmap for disabled trips
* @passive_delay_jiffies: number of jiffies to wait between polls when
* performing passive cooling.
* @polling_delay_jiffies: number of jiffies to wait between polls when
@@ -152,11 +159,13 @@ struct thermal_cooling_device {
* @node: node in thermal_tz_list (in thermal_core.c)
* @poll_queue: delayed work for polling
* @notify_event: Last notification event
+ * @suspended: thermal zone suspend indicator
*/
struct thermal_zone_device {
int id;
char type[THERMAL_NAME_LENGTH];
struct device device;
+ struct completion removal;
struct attribute_group trips_attribute_group;
struct thermal_attr *trip_temp_attrs;
struct thermal_attr *trip_type_attrs;
@@ -165,7 +174,6 @@ struct thermal_zone_device {
void *devdata;
struct thermal_trip *trips;
int num_trips;
- unsigned long trips_disabled; /* bitmap for disabled trips */
unsigned long passive_delay_jiffies;
unsigned long polling_delay_jiffies;
int temperature;
@@ -185,6 +193,10 @@ struct thermal_zone_device {
struct list_head node;
struct delayed_work poll_queue;
enum thermal_notify_event notify_event;
+#ifdef CONFIG_THERMAL_DEBUGFS
+ struct thermal_debugfs *debugfs;
+#endif
+ bool suspended;
};
/**
@@ -197,13 +209,18 @@ struct thermal_zone_device {
* thermal zone.
* @throttle: callback called for every trip point even if temperature is
* below the trip point temperature
+ * @update_tz: callback called when thermal zone internals have changed, e.g.
+ * thermal cooling instance was added/removed
* @governor_list: node in thermal_governor_list (in thermal_core.c)
*/
struct thermal_governor {
char name[THERMAL_NAME_LENGTH];
int (*bind_to_tz)(struct thermal_zone_device *tz);
void (*unbind_from_tz)(struct thermal_zone_device *tz);
- int (*throttle)(struct thermal_zone_device *tz, int trip);
+ int (*throttle)(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
+ void (*update_tz)(struct thermal_zone_device *tz,
+ enum thermal_notify_event reason);
struct list_head governor_list;
};
@@ -283,42 +300,53 @@ int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip);
int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip);
-
-int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
- const struct thermal_trip *trip);
-
+int for_each_thermal_trip(struct thermal_zone_device *tz,
+ int (*cb)(struct thermal_trip *, void *),
+ void *data);
+int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
+ int (*cb)(struct thermal_trip *, void *),
+ void *data);
int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
+void thermal_zone_set_trip_temp(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, int temp);
int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp);
-#ifdef CONFIG_THERMAL_ACPI
-int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
-int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
-int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
-int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
-#endif
-
#ifdef CONFIG_THERMAL
-struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
- void *, struct thermal_zone_device_ops *,
- struct thermal_zone_params *, int, int);
-
-void thermal_zone_device_unregister(struct thermal_zone_device *);
-
-struct thermal_zone_device *
-thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int,
- void *, struct thermal_zone_device_ops *,
- struct thermal_zone_params *, int, int);
+struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay);
+
+struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp);
+
+void thermal_zone_device_unregister(struct thermal_zone_device *tz);
void *thermal_zone_device_priv(struct thermal_zone_device *tzd);
const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
int thermal_zone_device_id(struct thermal_zone_device *tzd);
struct device *thermal_zone_device(struct thermal_zone_device *tzd);
+int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ unsigned long upper, unsigned long lower,
+ unsigned int weight);
int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *,
unsigned long, unsigned long,
unsigned int);
+int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev);
int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
@@ -345,15 +373,26 @@ int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
void thermal_zone_device_critical(struct thermal_zone_device *tz);
#else
-static inline struct thermal_zone_device *thermal_zone_device_register(
- const char *type, int trips, int mask, void *devdata,
- struct thermal_zone_device_ops *ops,
- struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay)
+static inline struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay)
{ return ERR_PTR(-ENODEV); }
-static inline void thermal_zone_device_unregister(
- struct thermal_zone_device *tz)
+
+static inline struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp)
+{ return ERR_PTR(-ENODEV); }
+
+static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{ }
+
static inline struct thermal_cooling_device *
thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index c02646884fa8..9ea0b28068f4 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -256,6 +256,11 @@ check_copy_size(const void *addr, size_t bytes, bool is_source)
static inline void arch_setup_new_exec(void) { }
#endif
+void arch_task_cache_init(void); /* for CONFIG_SH */
+void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst,
+ struct task_struct *src);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 90cd08ab2f5d..2c835e5c41f6 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -86,7 +86,7 @@ struct tb {
unsigned long privdata[];
};
-extern struct bus_type tb_bus_type;
+extern const struct bus_type tb_bus_type;
extern struct device_type tb_service_type;
extern struct device_type tb_xdomain_type;
@@ -172,6 +172,20 @@ int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
/**
+ * enum tb_link_width - Thunderbolt/USB4 link width
+ * @TB_LINK_WIDTH_SINGLE: Single lane link
+ * @TB_LINK_WIDTH_DUAL: Dual lane symmetric link
+ * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 transmitters
+ * @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers
+ */
+enum tb_link_width {
+ TB_LINK_WIDTH_SINGLE = BIT(0),
+ TB_LINK_WIDTH_DUAL = BIT(1),
+ TB_LINK_WIDTH_ASYM_TX = BIT(2),
+ TB_LINK_WIDTH_ASYM_RX = BIT(3),
+};
+
+/**
* struct tb_xdomain - Cross-domain (XDomain) connection
* @dev: XDomain device
* @tb: Pointer to the domain
@@ -186,7 +200,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @vendor_name: Name of the vendor (or %NULL if not known)
* @device_name: Name of the device (or %NULL if not known)
* @link_speed: Speed of the link in Gb/s
- * @link_width: Width of the link (1 or 2)
+ * @link_width: Width of the downstream facing link
* @link_usb4: Downstream link is USB4
* @is_unplugged: The XDomain is unplugged
* @needs_uuid: If the XDomain does not have @remote_uuid it will be
@@ -234,7 +248,7 @@ struct tb_xdomain {
const char *vendor_name;
const char *device_name;
unsigned int link_speed;
- unsigned int link_width;
+ enum tb_link_width link_width;
bool link_usb4;
bool is_unplugged;
bool needs_uuid;
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 44a7f9169ac6..10642d4844f0 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -271,7 +271,7 @@ long st_kim_stop(void *);
void st_kim_complete(void *);
void kim_st_list_protocols(struct st_data_s *, void *);
-void st_kim_recv(void *, const unsigned char *, long);
+void st_kim_recv(void *disc_data, const u8 *data, size_t count);
/*
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9459fef5b857..716d17f31c45 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -140,14 +140,6 @@ extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
-
-static inline void tick_nohz_idle_stop_tick_protected(void)
-{
- local_irq_disable();
- tick_nohz_idle_stop_tick();
- local_irq_enable();
-}
-
#else /* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
static inline int tick_nohz_tick_stopped(void) { return 0; }
@@ -170,8 +162,6 @@ static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
-
-static inline void tick_nohz_idle_stop_tick_protected(void) { }
#endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
index bb9d3f5542f8..876e31b4461d 100644
--- a/include/linux/time_namespace.h
+++ b/include/linux/time_namespace.h
@@ -7,10 +7,13 @@
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/err.h>
+#include <linux/time64.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
+struct vm_area_struct;
+
struct timens_offsets {
struct timespec64 monotonic;
struct timespec64 boottime;
@@ -44,7 +47,6 @@ struct time_namespace *copy_time_ns(unsigned long flags,
struct time_namespace *old_ns);
void free_time_ns(struct time_namespace *ns);
void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
-struct vdso_data *arch_get_vdso_data(void *vvar_page);
struct page *find_timens_vvar_page(struct vm_area_struct *vma);
static inline void put_time_ns(struct time_namespace *ns)
@@ -163,4 +165,6 @@ static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
}
#endif
+struct vdso_data *arch_get_vdso_data(void *vvar_page);
+
#endif /* _LINUX_TIMENS_H */
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index fe1e467ba046..7c43e98cf211 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -4,6 +4,7 @@
#include <linux/errno.h>
#include <linux/clocksource_ids.h>
+#include <linux/ktime.h>
/* Included from linux/ktime.h */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 9162f275819a..f18a2f1eb79e 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -7,21 +7,7 @@
#include <linux/stddef.h>
#include <linux/debugobjects.h>
#include <linux/stringify.h>
-
-struct timer_list {
- /*
- * All fields that change during normal runtime grouped to the
- * same cacheline
- */
- struct hlist_node entry;
- unsigned long expires;
- void (*function)(struct timer_list *);
- u32 flags;
-
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
-#endif
-};
+#include <linux/timer_types.h>
#ifdef CONFIG_LOCKDEP
/*
@@ -77,8 +63,7 @@ struct timer_list {
.entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.flags = (_flags), \
- __TIMER_LOCKDEP_MAP_INITIALIZER( \
- __FILE__ ":" __stringify(__LINE__)) \
+ __TIMER_LOCKDEP_MAP_INITIALIZER(FILE_LINE) \
}
#define DEFINE_TIMER(_name, _function) \
diff --git a/include/linux/timer_types.h b/include/linux/timer_types.h
new file mode 100644
index 000000000000..fae5a388f914
--- /dev/null
+++ b/include/linux/timer_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TIMER_TYPES_H
+#define _LINUX_TIMER_TYPES_H
+
+#include <linux/lockdep_types.h>
+#include <linux/types.h>
+
+struct timer_list {
+ /*
+ * All fields that change during normal runtime grouped to the
+ * same cacheline
+ */
+ struct hlist_node entry;
+ unsigned long expires;
+ void (*function)(struct timer_list *);
+ u32 flags;
+
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
+};
+
+#endif /* _LINUX_TIMER_TYPES_H */
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
index adc80e29168e..62973f7d4610 100644
--- a/include/linux/timerqueue.h
+++ b/include/linux/timerqueue.h
@@ -3,18 +3,7 @@
#define _LINUX_TIMERQUEUE_H
#include <linux/rbtree.h>
-#include <linux/ktime.h>
-
-
-struct timerqueue_node {
- struct rb_node node;
- ktime_t expires;
-};
-
-struct timerqueue_head {
- struct rb_root_cached rb_root;
-};
-
+#include <linux/timerqueue_types.h>
extern bool timerqueue_add(struct timerqueue_head *head,
struct timerqueue_node *node);
diff --git a/include/linux/timerqueue_types.h b/include/linux/timerqueue_types.h
new file mode 100644
index 000000000000..dc298d0923e3
--- /dev/null
+++ b/include/linux/timerqueue_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TIMERQUEUE_TYPES_H
+#define _LINUX_TIMERQUEUE_TYPES_H
+
+#include <linux/rbtree_types.h>
+#include <linux/types.h>
+
+struct timerqueue_node {
+ struct rb_node node;
+ ktime_t expires;
+};
+
+struct timerqueue_head {
+ struct rb_root_cached rb_root;
+};
+
+#endif /* _LINUX_TIMERQUEUE_TYPES_H */
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index 1c3948a1d6ad..3c13240077b8 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -106,6 +106,10 @@ int tnum_sbin(char *str, size_t size, struct tnum a);
struct tnum tnum_subreg(struct tnum a);
/* Returns the tnum with the lower 32-bit subreg cleared */
struct tnum tnum_clear_subreg(struct tnum a);
+/* Returns the tnum with the lower 32-bit subreg in *reg* set to the lower
+ * 32-bit subreg in *subreg*
+ */
+struct tnum tnum_with_subreg(struct tnum reg, struct tnum subreg);
/* Returns the tnum with the lower 32-bit subreg set to value */
struct tnum tnum_const_subreg(struct tnum a, u32 value);
/* Returns true if 32-bit subreg @a is a known constant*/
diff --git a/include/linux/topology.h b/include/linux/topology.h
index fea32377f7c7..52f5850730b3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -251,7 +251,7 @@ extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int
#else
static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
{
- return cpumask_nth(cpu, cpus);
+ return cpumask_nth_and(cpu, cpus, cpu_online_mask);
}
static inline const struct cpumask *
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 7038104463e4..1541454da03e 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -21,6 +21,7 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/hrtimer.h>
/* Definitions for a non-string torture-test module parameter. */
#define torture_param(type, name, init, msg) \
@@ -81,7 +82,8 @@ static inline void torture_random_init(struct torture_random_state *trsp)
}
/* Definitions for high-resolution-timer sleeps. */
-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp);
+int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
+ struct torture_random_state *trsp);
int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
@@ -108,19 +110,27 @@ bool torture_must_stop(void);
bool torture_must_stop_irq(void);
void torture_kthread_stopping(char *title);
int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
- char *f, struct task_struct **tp);
+ char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp));
void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_create_kthread(n, arg, tp) \
_torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
- "Failed to create " #n, &(tp))
+ "Failed to create " #n, &(tp), NULL)
+#define torture_create_kthread_cb(n, arg, tp, cbf) \
+ _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
+ "Failed to create " #n, &(tp), cbf)
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
+/* Scheduler-related definitions. */
#ifdef CONFIG_PREEMPTION
#define torture_preempt_schedule() __preempt_schedule()
#else
#define torture_preempt_schedule() do { } while (0)
#endif
+#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
+long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
+#endif
+
#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 6a1e8f157255..4ee9d13749ad 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -283,6 +283,7 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6),
TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7),
TPM_CHIP_FLAG_SUSPENDED = BIT(8),
+ TPM_CHIP_FLAG_HWRNG_DISABLED = BIT(9),
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
diff --git a/include/linux/trace.h b/include/linux/trace.h
index 2a70a447184c..fdcd76b7be83 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -51,7 +51,7 @@ int trace_array_printk(struct trace_array *tr, unsigned long ip,
const char *fmt, ...);
int trace_array_init_printk(struct trace_array *tr);
void trace_array_put(struct trace_array *tr);
-struct trace_array *trace_array_get_by_name(const char *name);
+struct trace_array *trace_array_get_by_name(const char *name, const char *systems);
int trace_array_destroy(struct trace_array *tr);
/* For osnoise tracer */
@@ -84,7 +84,7 @@ static inline int trace_array_init_printk(struct trace_array *tr)
static inline void trace_array_put(struct trace_array *tr)
{
}
-static inline struct trace_array *trace_array_get_by_name(const char *name)
+static inline struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
{
return NULL;
}
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 7c4a0b72334e..d68ff9b1247f 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -59,6 +59,17 @@ int trace_raw_output_prep(struct trace_iterator *iter,
extern __printf(2, 3)
void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
+/* Used to find the offset and length of dynamic fields in trace events */
+struct trace_dynamic_info {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ u16 len;
+ u16 offset;
+#else
+ u16 offset;
+ u16 len;
+#endif
+} __packed;
+
/*
* The trace entry - the most basic unit of tracing. This is what
* is printed in the end as a single line in the trace output, such as:
@@ -318,6 +329,7 @@ enum {
TRACE_EVENT_FL_KPROBE_BIT,
TRACE_EVENT_FL_UPROBE_BIT,
TRACE_EVENT_FL_EPROBE_BIT,
+ TRACE_EVENT_FL_FPROBE_BIT,
TRACE_EVENT_FL_CUSTOM_BIT,
};
@@ -332,6 +344,7 @@ enum {
* KPROBE - Event is a kprobe
* UPROBE - Event is a uprobe
* EPROBE - Event is an event probe
+ * FPROBE - Event is an function probe
* CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
* This is set when the custom event has not been attached
* to a tracepoint yet, then it is cleared when it is.
@@ -346,6 +359,7 @@ enum {
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
+ TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
};
@@ -478,6 +492,7 @@ enum {
EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT,
EVENT_FILE_FL_WAS_ENABLED_BIT,
+ EVENT_FILE_FL_FREED_BIT,
};
extern struct trace_event_file *trace_get_event_file(const char *instance,
@@ -616,6 +631,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
* TRIGGER_COND - When set, one or more triggers has an associated filter
* PID_FILTER - When set, the event is filtered based on pid
* WAS_ENABLED - Set when enabled to know to clear trace on module removal
+ * FREED - File descriptor is freed, all fields should be considered invalid
*/
enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
@@ -629,13 +645,14 @@ enum {
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
+ EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
};
struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
- struct dentry *dir;
+ struct eventfs_inode *ei;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
@@ -657,6 +674,7 @@ struct trace_event_file {
* caching and such. Which is mostly OK ;-)
*/
unsigned long flags;
+ atomic_t ref; /* ref count for opened files */
atomic_t sm_ref; /* soft-mode reference counter */
atomic_t tm_ref; /* trigger-mode reference counter */
};
@@ -747,8 +765,10 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
- u64 *probe_offset, u64 *probe_addr);
+ u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed);
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
#else
static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
@@ -786,7 +806,7 @@ static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
static inline int bpf_get_perf_event_info(const struct perf_event *event,
u32 *prog_id, u32 *fd_type,
const char **buf, u64 *probe_offset,
- u64 *probe_addr)
+ u64 *probe_addr, unsigned long *missed)
{
return -EOPNOTSUPP;
}
@@ -795,6 +815,11 @@ bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
return -EOPNOTSUPP;
}
+static inline int
+bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
#endif
enum {
@@ -804,6 +829,7 @@ enum {
FILTER_RDYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
+ FILTER_CPUMASK,
FILTER_COMM,
FILTER_CPU,
FILTER_STACKTRACE,
@@ -856,6 +882,7 @@ extern void perf_kprobe_destroy(struct perf_event *event);
extern int bpf_get_kprobe_info(const struct perf_event *event,
u32 *fd_type, const char **symbol,
u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed,
bool perf_type_tracepoint);
#endif
#ifdef CONFIG_UPROBE_EVENTS
@@ -864,7 +891,8 @@ extern int perf_uprobe_init(struct perf_event *event,
extern void perf_uprobe_destroy(struct perf_event *event);
extern int bpf_get_uprobe_info(const struct perf_event *event,
u32 *fd_type, const char **filename,
- u64 *probe_offset, bool perf_type_tracepoint);
+ u64 *probe_offset, u64 *probe_addr,
+ bool perf_type_tracepoint);
#endif
extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 6be92bf559fe..1ef95c0287f0 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -8,20 +8,31 @@
/*
* Trace sequences are used to allow a function to call several other functions
- * to create a string of data to use (up to a max of PAGE_SIZE).
+ * to create a string of data to use.
+ *
+ * Have the trace seq to be 8K which is typically PAGE_SIZE * 2 on
+ * most architectures. The TRACE_SEQ_BUFFER_SIZE (which is
+ * TRACE_SEQ_SIZE minus the other fields of trace_seq), is the
+ * max size the output of a trace event may be.
*/
+#define TRACE_SEQ_SIZE 8192
+#define TRACE_SEQ_BUFFER_SIZE (TRACE_SEQ_SIZE - \
+ (sizeof(struct seq_buf) + sizeof(size_t) + sizeof(int)))
+
struct trace_seq {
- char buffer[PAGE_SIZE];
+ char buffer[TRACE_SEQ_BUFFER_SIZE];
struct seq_buf seq;
+ size_t readpos;
int full;
};
static inline void
trace_seq_init(struct trace_seq *s)
{
- seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
+ seq_buf_init(&s->seq, s->buffer, TRACE_SEQ_BUFFER_SIZE);
s->full = 0;
+ s->readpos = 0;
}
/**
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 99912445974c..7a5fe17b6bf9 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -21,6 +21,72 @@ struct file_operations;
#ifdef CONFIG_TRACING
+struct eventfs_file;
+
+/**
+ * eventfs_callback - A callback function to create dynamic files in eventfs
+ * @name: The name of the file that is to be created
+ * @mode: return the file mode for the file (RW access, etc)
+ * @data: data to pass to the created file ops
+ * @fops: the file operations of the created file
+ *
+ * The evetnfs files are dynamically created. The struct eventfs_entry array
+ * is passed to eventfs_create_dir() or eventfs_create_events_dir() that will
+ * be used to create the files within those directories. When a lookup
+ * or access to a file within the directory is made, the struct eventfs_entry
+ * array is used to find a callback() with the matching name that is being
+ * referenced (for lookups, the entire array is iterated and each callback
+ * will be called).
+ *
+ * The callback will be called with @name for the name of the file to create.
+ * The callback can return less than 1 to indicate that no file should be
+ * created.
+ *
+ * If a file is to be created, then @mode should be populated with the file
+ * mode (permissions) for which the file is created for. This would be
+ * used to set the created inode i_mode field.
+ *
+ * The @data should be set to the data passed to the other file operations
+ * (read, write, etc). Note, @data will also point to the data passed in
+ * to eventfs_create_dir() or eventfs_create_events_dir(), but the callback
+ * can replace the data if it chooses to. Otherwise, the original data
+ * will be used for the file operation functions.
+ *
+ * The @fops should be set to the file operations that will be used to create
+ * the inode.
+ *
+ * NB. This callback is called while holding internal locks of the eventfs
+ * system. The callback must not call any code that might also call into
+ * the tracefs or eventfs system or it will risk creating a deadlock.
+ */
+typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops);
+
+/**
+ * struct eventfs_entry - dynamically created eventfs file call back handler
+ * @name: Then name of the dynamic file in an eventfs directory
+ * @callback: The callback to get the fops of the file when it is created
+ *
+ * See evenfs_callback() typedef for how to set up @callback.
+ */
+struct eventfs_entry {
+ const char *name;
+ eventfs_callback callback;
+};
+
+struct eventfs_inode;
+
+struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data);
+
+struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data);
+
+void eventfs_remove_events_dir(struct eventfs_inode *ei);
+void eventfs_remove_dir(struct eventfs_inode *ei);
+
struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index e7c2276be33e..4dc4955f0fbf 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -35,6 +35,7 @@ struct tracepoint {
struct static_call_key *static_call_key;
void *static_call_tramp;
void *iterator;
+ void *probestub;
int (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 6811e43c1b5c..88c0ba623ee6 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -303,6 +303,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
__section("__tracepoints_strings") = #_name; \
extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \
int __traceiter_##_name(void *__data, proto); \
+ void __probestub_##_name(void *__data, proto); \
struct tracepoint __tracepoint_##_name __used \
__section("__tracepoints") = { \
.name = __tpstrtab_##_name, \
@@ -310,6 +311,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
.static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \
.static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
.iterator = &__traceiter_##_name, \
+ .probestub = &__probestub_##_name, \
.regfunc = _reg, \
.unregfunc = _unreg, \
.funcs = NULL }; \
@@ -330,6 +332,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} \
return 0; \
} \
+ void __probestub_##_name(void *__data, proto) \
+ { \
+ } \
DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
#define DEFINE_TRACE(name, proto, args) \
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
new file mode 100644
index 000000000000..de8324a2223c
--- /dev/null
+++ b/include/linux/tsm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TSM_H
+#define __TSM_H
+
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define TSM_INBLOB_MAX 64
+#define TSM_OUTBLOB_MAX SZ_32K
+
+/*
+ * Privilege level is a nested permission concept to allow confidential
+ * guests to partition address space, 4-levels are supported.
+ */
+#define TSM_PRIVLEVEL_MAX 3
+
+/**
+ * struct tsm_desc - option descriptor for generating tsm report blobs
+ * @privlevel: optional privilege level to associate with @outblob
+ * @inblob_len: sizeof @inblob
+ * @inblob: arbitrary input data
+ */
+struct tsm_desc {
+ unsigned int privlevel;
+ size_t inblob_len;
+ u8 inblob[TSM_INBLOB_MAX];
+};
+
+/**
+ * struct tsm_report - track state of report generation relative to options
+ * @desc: input parameters to @report_new()
+ * @outblob_len: sizeof(@outblob)
+ * @outblob: generated evidence to provider to the attestation agent
+ * @auxblob_len: sizeof(@auxblob)
+ * @auxblob: (optional) auxiliary data to the report (e.g. certificate data)
+ */
+struct tsm_report {
+ struct tsm_desc desc;
+ size_t outblob_len;
+ u8 *outblob;
+ size_t auxblob_len;
+ u8 *auxblob;
+};
+
+/**
+ * struct tsm_ops - attributes and operations for tsm instances
+ * @name: tsm id reflected in /sys/kernel/config/tsm/report/$report/provider
+ * @privlevel_floor: convey base privlevel for nested scenarios
+ * @report_new: Populate @report with the report blob and auxblob
+ * (optional), return 0 on successful population, or -errno otherwise
+ *
+ * Implementation specific ops, only one is expected to be registered at
+ * a time i.e. only one of "sev-guest", "tdx-guest", etc.
+ */
+struct tsm_ops {
+ const char *name;
+ const unsigned int privlevel_floor;
+ int (*report_new)(struct tsm_report *report, void *data);
+};
+
+extern const struct config_item_type tsm_report_default_type;
+
+/* publish @privlevel, @privlevel_floor, and @auxblob attributes */
+extern const struct config_item_type tsm_report_extra_type;
+
+int tsm_register(const struct tsm_ops *ops, void *priv,
+ const struct config_item_type *type);
+int tsm_unregister(const struct tsm_ops *ops);
+#endif /* __TSM_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e8d5d9997aca..8c76fd97d4ad 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -192,13 +192,14 @@ struct tty_operations;
*/
struct tty_struct {
struct kref kref;
+ int index;
struct device *dev;
struct tty_driver *driver;
+ struct tty_port *port;
const struct tty_operations *ops;
- int index;
- struct ld_semaphore ldisc_sem;
struct tty_ldisc *ldisc;
+ struct ld_semaphore ldisc_sem;
struct mutex atomic_write_lock;
struct mutex legacy_mutex;
@@ -209,6 +210,7 @@ struct tty_struct {
char name[64];
unsigned long flags;
int count;
+ unsigned int receive_room;
struct winsize winsize;
struct {
@@ -219,16 +221,16 @@ struct tty_struct {
} __aligned(sizeof(unsigned long)) flow;
struct {
- spinlock_t lock;
struct pid *pgrp;
struct pid *session;
+ spinlock_t lock;
unsigned char pktstatus;
bool packet;
unsigned long unused[0];
} __aligned(sizeof(unsigned long)) ctrl;
bool hw_stopped;
- unsigned int receive_room;
+ bool closing;
int flow_change;
struct tty_struct *link;
@@ -239,15 +241,13 @@ struct tty_struct {
void *disc_data;
void *driver_data;
spinlock_t files_lock;
+ int write_cnt;
+ u8 *write_buf;
+
struct list_head tty_files;
#define N_TTY_BUF_SIZE 4096
-
- int closing;
- unsigned char *write_buf;
- int write_cnt;
struct work_struct SAK_work;
- struct tty_port *port;
} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
@@ -390,14 +390,14 @@ int vcs_init(void);
extern const struct class tty_class;
/**
- * tty_kref_get - get a tty reference
- * @tty: tty device
+ * tty_kref_get - get a tty reference
+ * @tty: tty device
*
- * Return a new reference to a tty object. The caller must hold
- * sufficient locks/counts to ensure that their existing reference cannot
- * go away
+ * Returns: a new reference to a tty object
+ *
+ * Locking: The caller must hold sufficient locks/counts to ensure that their
+ * existing reference cannot go away.
*/
-
static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
{
if (tty)
@@ -410,17 +410,18 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout);
void stop_tty(struct tty_struct *tty);
void start_tty(struct tty_struct *tty);
void tty_write_message(struct tty_struct *tty, char *msg);
-int tty_send_xchar(struct tty_struct *tty, char ch);
-int tty_put_char(struct tty_struct *tty, unsigned char c);
+int tty_send_xchar(struct tty_struct *tty, u8 ch);
+int tty_put_char(struct tty_struct *tty, u8 c);
unsigned int tty_chars_in_buffer(struct tty_struct *tty);
unsigned int tty_write_room(struct tty_struct *tty);
void tty_driver_flush_buffer(struct tty_struct *tty);
void tty_unthrottle(struct tty_struct *tty);
-int tty_throttle_safe(struct tty_struct *tty);
-int tty_unthrottle_safe(struct tty_struct *tty);
+bool tty_throttle_safe(struct tty_struct *tty);
+bool tty_unthrottle_safe(struct tty_struct *tty);
int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
int tty_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount);
+int tty_get_tiocm(struct tty_struct *tty);
int is_current_pgrp_orphaned(void);
void tty_hangup(struct tty_struct *tty);
void tty_vhangup(struct tty_struct *tty);
@@ -435,16 +436,14 @@ void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud,
speed_t obaud);
/**
- * tty_get_baud_rate - get tty bit rates
- * @tty: tty to query
+ * tty_get_baud_rate - get tty bit rates
+ * @tty: tty to query
*
- * Returns the baud rate as an integer for this terminal. The
- * termios lock must be held by the caller and the terminal bit
- * flags may be updated.
+ * Returns: the baud rate as an integer for this terminal
*
- * Locking: none
+ * Locking: The termios lock must be held by the caller.
*/
-static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
+static inline speed_t tty_get_baud_rate(const struct tty_struct *tty)
{
return tty_termios_baud_rate(&tty->termios);
}
diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h
index 6ceb2789e6c8..31125e3be3c5 100644
--- a/include/linux/tty_buffer.h
+++ b/include/linux/tty_buffer.h
@@ -12,24 +12,24 @@ struct tty_buffer {
struct tty_buffer *next;
struct llist_node free;
};
- int used;
- int size;
- int commit;
- int lookahead; /* Lazy update on recv, can become less than "read" */
- int read;
+ unsigned int used;
+ unsigned int size;
+ unsigned int commit;
+ unsigned int lookahead; /* Lazy update on recv, can become less than "read" */
+ unsigned int read;
bool flags;
/* Data points here */
- unsigned long data[];
+ u8 data[] __aligned(sizeof(unsigned long));
};
-static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
+static inline u8 *char_buf_ptr(struct tty_buffer *b, unsigned int ofs)
{
- return ((unsigned char *)b->data) + ofs;
+ return b->data + ofs;
}
-static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs)
+static inline u8 *flag_buf_ptr(struct tty_buffer *b, unsigned int ofs)
{
- return (char *)char_buf_ptr(b, ofs) + b->size;
+ return char_buf_ptr(b, ofs) + b->size;
}
struct tty_bufhead {
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index e00034118c7b..7372124fbf90 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -72,8 +72,7 @@ struct serial_struct;
* is closed for the last time freeing up the resources. This is
* actually the second part of shutdown for routines that might sleep.
*
- * @write: ``int ()(struct tty_struct *tty, const unsigned char *buf,
- * int count)``
+ * @write: ``ssize_t ()(struct tty_struct *tty, const u8 *buf, size_t count)``
*
* This routine is called by the kernel to write a series (@count) of
* characters (@buf) to the @tty device. The characters may come from
@@ -85,7 +84,7 @@ struct serial_struct;
*
* Optional: Required for writable devices. May not sleep.
*
- * @put_char: ``int ()(struct tty_struct *tty, unsigned char ch)``
+ * @put_char: ``int ()(struct tty_struct *tty, u8 ch)``
*
* This routine is called by the kernel to write a single character @ch to
* the @tty device. If the kernel uses this routine, it must call the
@@ -243,7 +242,7 @@ struct serial_struct;
* Optional: If not provided, the device is assumed to have no FIFO.
* Usually correct to invoke via tty_wait_until_sent(). May sleep.
*
- * @send_xchar: ``void ()(struct tty_struct *tty, char ch)``
+ * @send_xchar: ``void ()(struct tty_struct *tty, u8 ch)``
*
* This routine is used to send a high-priority XON/XOFF character (@ch)
* to the @tty device.
@@ -356,9 +355,8 @@ struct tty_operations {
void (*close)(struct tty_struct * tty, struct file * filp);
void (*shutdown)(struct tty_struct *tty);
void (*cleanup)(struct tty_struct *tty);
- int (*write)(struct tty_struct * tty,
- const unsigned char *buf, int count);
- int (*put_char)(struct tty_struct *tty, unsigned char ch);
+ ssize_t (*write)(struct tty_struct *tty, const u8 *buf, size_t count);
+ int (*put_char)(struct tty_struct *tty, u8 ch);
void (*flush_chars)(struct tty_struct *tty);
unsigned int (*write_room)(struct tty_struct *tty);
unsigned int (*chars_in_buffer)(struct tty_struct *tty);
@@ -376,7 +374,7 @@ struct tty_operations {
void (*flush_buffer)(struct tty_struct *tty);
void (*set_ldisc)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, int timeout);
- void (*send_xchar)(struct tty_struct *tty, char ch);
+ void (*send_xchar)(struct tty_struct *tty, u8 ch);
int (*tiocmget)(struct tty_struct *tty);
int (*tiocmset)(struct tty_struct *tty,
unsigned int set, unsigned int clear);
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index bfaaeee61a05..af4fce98f64e 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -10,17 +10,59 @@ struct tty_ldisc;
int tty_buffer_set_limit(struct tty_port *port, int limit);
unsigned int tty_buffer_space_avail(struct tty_port *port);
int tty_buffer_request_room(struct tty_port *port, size_t size);
-int tty_insert_flip_string_flags(struct tty_port *port,
- const unsigned char *chars, const char *flags, size_t size);
-int tty_insert_flip_string_fixed_flag(struct tty_port *port,
- const unsigned char *chars, char flag, size_t size);
-int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
- size_t size);
+size_t __tty_insert_flip_string_flags(struct tty_port *port, const u8 *chars,
+ const u8 *flags, bool mutable_flags,
+ size_t size);
+size_t tty_prepare_flip_string(struct tty_port *port, u8 **chars, size_t size);
void tty_flip_buffer_push(struct tty_port *port);
-int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
-static inline int tty_insert_flip_char(struct tty_port *port,
- unsigned char ch, char flag)
+/**
+ * tty_insert_flip_string_fixed_flag - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flag: flag value for each character
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. All the characters passed are
+ * marked with the supplied flag.
+ *
+ * Returns: the number added.
+ */
+static inline size_t tty_insert_flip_string_fixed_flag(struct tty_port *port,
+ const u8 *chars, u8 flag,
+ size_t size)
+{
+ return __tty_insert_flip_string_flags(port, chars, &flag, false, size);
+}
+
+/**
+ * tty_insert_flip_string_flags - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flags: flag bytes
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. For each character the flags
+ * array indicates the status of the character.
+ *
+ * Returns: the number added.
+ */
+static inline size_t tty_insert_flip_string_flags(struct tty_port *port,
+ const u8 *chars,
+ const u8 *flags, size_t size)
+{
+ return __tty_insert_flip_string_flags(port, chars, flags, true, size);
+}
+
+/**
+ * tty_insert_flip_char - add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
+ *
+ * Queue a single byte @ch to the tty buffering, with an optional flag.
+ */
+static inline size_t tty_insert_flip_char(struct tty_port *port, u8 ch, u8 flag)
{
struct tty_buffer *tb = port->buf.tail;
int change;
@@ -32,17 +74,17 @@ static inline int tty_insert_flip_char(struct tty_port *port,
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return __tty_insert_flip_char(port, ch, flag);
+ return __tty_insert_flip_string_flags(port, &ch, &flag, false, 1);
}
-static inline int tty_insert_flip_string(struct tty_port *port,
- const unsigned char *chars, size_t size)
+static inline size_t tty_insert_flip_string(struct tty_port *port,
+ const u8 *chars, size_t size)
{
return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size);
}
-int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
- const char *f, int count);
+size_t tty_ldisc_receive_buf(struct tty_ldisc *ld, const u8 *p, const u8 *f,
+ size_t count);
void tty_buffer_lock_exclusive(struct tty_port *port);
void tty_buffer_unlock_exclusive(struct tty_port *port);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 49dc172dedc7..af01e89074b2 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -71,7 +71,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* call to @receive_buf(). Returning an error will prevent the ldisc from
* being attached.
*
- * Can sleep.
+ * Optional. Can sleep.
*
* @close: [TTY] ``void ()(struct tty_struct *tty)``
*
@@ -80,7 +80,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* changed to use a new line discipline. At the point of execution no
* further users will enter the ldisc code for this tty.
*
- * Can sleep.
+ * Optional. Can sleep.
*
* @flush_buffer: [TTY] ``void ()(struct tty_struct *tty)``
*
@@ -88,8 +88,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* input characters it may have queued to be delivered to the user mode
* process. It may be called at any point between open and close.
*
- * @read: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
- * unsigned char *buf, size_t nr)``
+ * Optional.
+ *
+ * @read: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file, u8 *buf,
+ * size_t nr)``
*
* This function is called when the user requests to read from the @tty.
* The line discipline will return whatever characters it has buffered up
@@ -97,10 +99,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* an %EIO error. Multiple read calls may occur in parallel and the ldisc
* must deal with serialization issues.
*
- * Can sleep.
+ * Optional: %EIO unless provided. Can sleep.
*
* @write: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
- * const unsigned char *buf, size_t nr)``
+ * const u8 *buf, size_t nr)``
*
* This function is called when the user requests to write to the @tty.
* The line discipline will deliver the characters to the low-level tty
@@ -108,7 +110,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* characters first. If this function is not defined, the user will
* receive an %EIO error.
*
- * Can sleep.
+ * Optional: %EIO unless provided. Can sleep.
*
* @ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
* unsigned long arg)``
@@ -120,6 +122,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* discpline. So a low-level driver can "grab" an ioctl request before
* the line discpline has a chance to see it.
*
+ * Optional.
+ *
* @compat_ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
* unsigned long arg)``
*
@@ -130,11 +134,15 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* a pointer to wordsize-sensitive structure belongs here, but most of
* ldiscs will happily leave it %NULL.
*
+ * Optional.
+ *
* @set_termios: [TTY] ``void ()(struct tty_struct *tty, const struct ktermios *old)``
*
* This function notifies the line discpline that a change has been made
* to the termios structure.
*
+ * Optional.
+ *
* @poll: [TTY] ``int ()(struct tty_struct *tty, struct file *file,
* struct poll_table_struct *wait)``
*
@@ -142,6 +150,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* device. It is solely the responsibility of the line discipline to
* handle poll requests.
*
+ * Optional.
+ *
* @hangup: [TTY] ``void ()(struct tty_struct *tty)``
*
* Called on a hangup. Tells the discipline that it should cease I/O to
@@ -149,10 +159,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* but should wait until any pending driver I/O is completed. No further
* calls into the ldisc code will occur.
*
- * Can sleep.
+ * Optional. Can sleep.
*
- * @receive_buf: [DRV] ``void ()(struct tty_struct *tty,
- * const unsigned char *cp, const char *fp, int count)``
+ * @receive_buf: [DRV] ``void ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
*
* This function is called by the low-level tty driver to send characters
* received by the hardware to the line discpline for processing. @cp is
@@ -161,6 +171,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* character was received with a parity error, etc. @fp may be %NULL to
* indicate all data received is %TTY_NORMAL.
*
+ * Optional.
+ *
* @write_wakeup: [DRV] ``void ()(struct tty_struct *tty)``
*
* This function is called by the low-level tty driver to signal that line
@@ -170,13 +182,17 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* send, please arise a tasklet or workqueue to do the real data transfer.
* Do not send data in this hook, it may lead to a deadlock.
*
+ * Optional.
+ *
* @dcd_change: [DRV] ``void ()(struct tty_struct *tty, bool active)``
*
* Tells the discipline that the DCD pin has changed its status. Used
* exclusively by the %N_PPS (Pulse-Per-Second) line discipline.
*
- * @receive_buf2: [DRV] ``int ()(struct tty_struct *tty,
- * const unsigned char *cp, const char *fp, int count)``
+ * Optional.
+ *
+ * @receive_buf2: [DRV] ``ssize_t ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
*
* This function is called by the low-level tty driver to send characters
* received by the hardware to the line discpline for processing. @cp is a
@@ -186,8 +202,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* indicate all data received is %TTY_NORMAL. If assigned, prefer this
* function for automatic flow control.
*
- * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty,
- * const unsigned char *cp, const char *fp, int count)``
+ * Optional.
+ *
+ * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
*
* This function is called by the low-level tty driver for characters
* not eaten by ->receive_buf() or ->receive_buf2(). It is useful for
@@ -198,6 +216,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* same characters (e.g. by skipping the actions for high-priority
* characters already handled by ->lookahead_buf()).
*
+ * Optional.
+ *
* @owner: module containting this ldisc (for reference counting)
*
* This structure defines the interface between the tty line discipline
@@ -218,11 +238,10 @@ struct tty_ldisc_ops {
int (*open)(struct tty_struct *tty);
void (*close)(struct tty_struct *tty);
void (*flush_buffer)(struct tty_struct *tty);
- ssize_t (*read)(struct tty_struct *tty, struct file *file,
- unsigned char *buf, size_t nr,
- void **cookie, unsigned long offset);
+ ssize_t (*read)(struct tty_struct *tty, struct file *file, u8 *buf,
+ size_t nr, void **cookie, unsigned long offset);
ssize_t (*write)(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr);
+ const u8 *buf, size_t nr);
int (*ioctl)(struct tty_struct *tty, unsigned int cmd,
unsigned long arg);
int (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd,
@@ -235,14 +254,14 @@ struct tty_ldisc_ops {
/*
* The following routines are called from below.
*/
- void (*receive_buf)(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count);
+ void (*receive_buf)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
void (*write_wakeup)(struct tty_struct *tty);
void (*dcd_change)(struct tty_struct *tty, bool active);
- int (*receive_buf2)(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count);
- void (*lookahead_buf)(struct tty_struct *tty, const unsigned char *cp,
- const unsigned char *fp, unsigned int count);
+ size_t (*receive_buf2)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
+ void (*lookahead_buf)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
struct module *owner;
};
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
index edf685a24f7c..1b861f2100b6 100644
--- a/include/linux/tty_port.h
+++ b/include/linux/tty_port.h
@@ -39,9 +39,10 @@ struct tty_port_operations {
};
struct tty_port_client_operations {
- int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t);
- void (*lookahead_buf)(struct tty_port *port, const unsigned char *cp,
- const unsigned char *fp, unsigned int count);
+ size_t (*receive_buf)(struct tty_port *port, const u8 *cp, const u8 *fp,
+ size_t count);
+ void (*lookahead_buf)(struct tty_port *port, const u8 *cp,
+ const u8 *fp, size_t count);
void (*write_wakeup)(struct tty_port *port);
};
@@ -113,8 +114,8 @@ struct tty_port {
unsigned char console:1;
struct mutex mutex;
struct mutex buf_mutex;
- unsigned char *xmit_buf;
- DECLARE_KFIFO_PTR(xmit_fifo, unsigned char);
+ u8 *xmit_buf;
+ DECLARE_KFIFO_PTR(xmit_fifo, u8);
unsigned int close_delay;
unsigned int closing_wait;
int drain_delay;
@@ -148,10 +149,10 @@ struct device *tty_port_register_device_attr(struct tty_port *port,
const struct attribute_group **attr_grp);
struct device *tty_port_register_device_serdev(struct tty_port *port,
struct tty_driver *driver, unsigned index,
- struct device *device);
+ struct device *host, struct device *parent);
struct device *tty_port_register_device_attr_serdev(struct tty_port *port,
struct tty_driver *driver, unsigned index,
- struct device *device, void *drvdata,
+ struct device *host, struct device *parent, void *drvdata,
const struct attribute_group **attr_grp);
void tty_port_unregister_device(struct tty_port *port,
struct tty_driver *driver, unsigned index);
diff --git a/include/linux/types.h b/include/linux/types.h
index 688fb943556a..2bc8766ba20c 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -10,6 +10,11 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
+#ifdef __SIZEOF_INT128__
+typedef __s128 s128;
+typedef __u128 u128;
+#endif
+
typedef u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
@@ -35,6 +40,7 @@ typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
+typedef long intptr_t;
#ifdef CONFIG_HAVE_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
@@ -114,6 +120,9 @@ typedef s64 int64_t;
#define aligned_be64 __aligned_be64
#define aligned_le64 __aligned_le64
+/* Nanosecond scalar representation for kernel time values */
+typedef s64 ktime_t;
+
/**
* The type used for indexing onto a disc or disc partition.
*
diff --git a/include/linux/uacce.h b/include/linux/uacce.h
index 0a81c3dfd26c..e290c0269944 100644
--- a/include/linux/uacce.h
+++ b/include/linux/uacce.h
@@ -86,6 +86,7 @@ enum uacce_q_state {
* @state: queue state machine
* @pasid: pasid associated to the mm
* @handle: iommu_sva handle returned by iommu_sva_bind_device()
+ * @mapping: user space mapping of the queue
*/
struct uacce_queue {
struct uacce_device *uacce;
@@ -97,6 +98,7 @@ struct uacce_queue {
enum uacce_q_state state;
u32 pasid;
struct iommu_sva *handle;
+ struct address_space *mapping;
};
/**
@@ -114,7 +116,6 @@ struct uacce_queue {
* @mutex: protects uacce operation
* @priv: private pointer of the uacce
* @queues: list of queues
- * @inode: core vfs
*/
struct uacce_device {
const char *algs;
@@ -130,7 +131,6 @@ struct uacce_device {
struct mutex mutex;
void *priv;
struct list_head queues;
- struct inode *inode;
};
#if IS_ENABLED(CONFIG_UACCE)
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
index cf3ada3e820e..c499ae809c7d 100644
--- a/include/linux/ucs2_string.h
+++ b/include/linux/ucs2_string.h
@@ -10,6 +10,7 @@ typedef u16 ucs2_char_t;
unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength);
unsigned long ucs2_strlen(const ucs2_char_t *s);
unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+ssize_t ucs2_strscpy(ucs2_char_t *dst, const ucs2_char_t *src, size_t count);
int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
unsigned long ucs2_utf8size(const ucs2_char_t *src);
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 43c1fb2d2c21..d04188714dca 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -32,25 +32,30 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
return (num + net_hash_mix(net)) & mask;
}
+enum {
+ UDP_FLAGS_CORK, /* Cork is required */
+ UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
+ UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
+ UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
+ UDP_FLAGS_ACCEPT_FRAGLIST,
+ UDP_FLAGS_ACCEPT_L4,
+ UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
+ UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
+ UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
+};
+
struct udp_sock {
/* inet_sock has to be the first member */
struct inet_sock inet;
#define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
+
+ unsigned long udp_flags;
+
int pending; /* Any pending frames ? */
- unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
- unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
- encap_enabled:1, /* This socket enabled encap
- * processing; UDP tunnels and
- * different encapsulation layer set
- * this
- */
- gro_enabled:1, /* Request GRO aggregation */
- accept_udp_l4:1,
- accept_udp_fraglist:1;
+
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -62,12 +67,6 @@ struct udp_sock {
*/
__u16 pcslen;
__u16 pcrlen;
-/* indicator bits used by pcflag: */
-#define UDPLITE_BIT 0x1 /* set by udplite proto init function */
-#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
-#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
- __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
- __u8 unused[3];
/*
* For encapsulation sockets.
*/
@@ -95,28 +94,39 @@ struct udp_sock {
int forward_threshold;
};
+#define udp_test_bit(nr, sk) \
+ test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_set_bit(nr, sk) \
+ set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_test_and_set_bit(nr, sk) \
+ test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_clear_bit(nr, sk) \
+ clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_assign_bit(nr, sk, val) \
+ assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
+
#define UDP_MAX_SEGMENTS (1 << 6UL)
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
{
- udp_sk(sk)->no_check6_tx = val;
+ udp_assign_bit(NO_CHECK6_TX, sk, val);
}
static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
{
- udp_sk(sk)->no_check6_rx = val;
+ udp_assign_bit(NO_CHECK6_RX, sk, val);
}
-static inline bool udp_get_no_check6_tx(struct sock *sk)
+static inline bool udp_get_no_check6_tx(const struct sock *sk)
{
- return udp_sk(sk)->no_check6_tx;
+ return udp_test_bit(NO_CHECK6_TX, sk);
}
-static inline bool udp_get_no_check6_rx(struct sock *sk)
+static inline bool udp_get_no_check6_rx(const struct sock *sk)
{
- return udp_sk(sk)->no_check6_rx;
+ return udp_test_bit(NO_CHECK6_RX, sk);
}
static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
@@ -135,10 +145,12 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
if (!skb_is_gso(skb))
return false;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
+ !udp_test_bit(ACCEPT_L4, sk))
return true;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
+ !udp_test_bit(ACCEPT_FRAGLIST, sk))
return true;
return false;
@@ -146,8 +158,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
static inline void udp_allow_gso(struct sock *sk)
{
- udp_sk(sk)->accept_udp_l4 = 1;
- udp_sk(sk)->accept_udp_fraglist = 1;
+ udp_set_bit(ACCEPT_L4, sk);
+ udp_set_bit(ACCEPT_FRAGLIST, sk);
}
#define udp_portaddr_for_each_entry(__sk, list) \
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
index b0542cd11aeb..f85ec5613721 100644
--- a/include/linux/uidgid.h
+++ b/include/linux/uidgid.h
@@ -12,20 +12,12 @@
* to detect when we overlook these differences.
*
*/
-#include <linux/types.h>
+#include <linux/uidgid_types.h>
#include <linux/highuid.h>
struct user_namespace;
extern struct user_namespace init_user_ns;
-
-typedef struct {
- uid_t val;
-} kuid_t;
-
-
-typedef struct {
- gid_t val;
-} kgid_t;
+struct uid_gid_map;
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }
@@ -138,6 +130,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return from_kgid(ns, gid) != (gid_t) -1;
}
+u32 map_id_down(struct uid_gid_map *map, u32 id);
+u32 map_id_up(struct uid_gid_map *map, u32 id);
+
#else
static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid)
@@ -186,6 +181,15 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
return gid_valid(gid);
}
+static inline u32 map_id_down(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
+
+static inline u32 map_id_up(struct uid_gid_map *map, u32 id)
+{
+ return id;
+}
#endif /* CONFIG_USER_NS */
#endif /* _LINUX_UIDGID_H */
diff --git a/include/linux/uidgid_types.h b/include/linux/uidgid_types.h
new file mode 100644
index 000000000000..b35ac4955a33
--- /dev/null
+++ b/include/linux/uidgid_types.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UIDGID_TYPES_H
+#define _LINUX_UIDGID_TYPES_H
+
+#include <linux/types.h>
+
+typedef struct {
+ uid_t val;
+} kuid_t;
+
+typedef struct {
+ gid_t val;
+} kgid_t;
+
+#endif /* _LINUX_UIDGID_TYPES_H */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 044c1d8c230c..00cebe2b70de 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -11,7 +11,6 @@
#include <uapi/linux/uio.h>
struct page;
-struct pipe_inode_info;
typedef unsigned int __bitwise iov_iter_extraction_t;
@@ -22,13 +21,12 @@ struct kvec {
enum iter_type {
/* iter types */
+ ITER_UBUF,
ITER_IOVEC,
- ITER_KVEC,
ITER_BVEC,
- ITER_PIPE,
+ ITER_KVEC,
ITER_XARRAY,
ITER_DISCARD,
- ITER_UBUF,
};
#define ITER_SOURCE 1 // == WRITE
@@ -42,14 +40,9 @@ struct iov_iter_state {
struct iov_iter {
u8 iter_type;
- bool copy_mc;
bool nofault;
bool data_source;
- bool user_backed;
- union {
- size_t iov_offset;
- int last_offset;
- };
+ size_t iov_offset;
/*
* Hack alert: overlay ubuf_iovec with iovec + count, so
* that the members resolve correctly regardless of the type
@@ -74,7 +67,6 @@ struct iov_iter {
const struct kvec *kvec;
const struct bio_vec *bvec;
struct xarray *xarray;
- struct pipe_inode_info *pipe;
void __user *ubuf;
};
size_t count;
@@ -82,10 +74,6 @@ struct iov_iter {
};
union {
unsigned long nr_segs;
- struct {
- unsigned int head;
- unsigned int start_head;
- };
loff_t xarray_start;
};
};
@@ -133,11 +121,6 @@ static inline bool iov_iter_is_bvec(const struct iov_iter *i)
return iov_iter_type(i) == ITER_BVEC;
}
-static inline bool iov_iter_is_pipe(const struct iov_iter *i)
-{
- return iov_iter_type(i) == ITER_PIPE;
-}
-
static inline bool iov_iter_is_discard(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_DISCARD;
@@ -155,7 +138,7 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
static inline bool user_backed_iter(const struct iov_iter *i)
{
- return i->user_backed;
+ return iter_is_ubuf(i) || iter_is_iovec(i);
}
/*
@@ -175,7 +158,7 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
return ret;
}
-size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
+size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
@@ -196,6 +179,13 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
{
return copy_page_to_iter(&folio->page, offset, bytes, i);
}
+
+static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
+ size_t offset, size_t bytes, struct iov_iter *i)
+{
+ return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
+}
+
size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
size_t bytes, struct iov_iter *i);
@@ -257,22 +247,8 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
#ifdef CONFIG_ARCH_HAS_COPY_MC
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
-static inline void iov_iter_set_copy_mc(struct iov_iter *i)
-{
- i->copy_mc = true;
-}
-
-static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
-{
- return i->copy_mc;
-}
#else
#define _copy_mc_to_iter _copy_to_iter
-static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
-static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
-{
- return false;
-}
#endif
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
@@ -286,19 +262,11 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec
unsigned long nr_segs, size_t count);
void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
unsigned long nr_segs, size_t count);
-void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
- size_t count);
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
loff_t start, size_t count);
-ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
- size_t maxsize, unsigned maxpages, size_t *start,
- iov_iter_extraction_t extraction_flags);
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start);
-ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
- struct page ***pages, size_t maxsize, size_t *start,
- iov_iter_extraction_t extraction_flags);
ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
size_t maxsize, size_t *start);
int iov_iter_npages(const struct iov_iter *i, int maxpages);
@@ -355,27 +323,6 @@ iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
return npages;
}
-struct csum_state {
- __wsum csum;
- size_t off;
-};
-
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-
-static __always_inline __must_check
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
- __wsum *csum, struct iov_iter *i)
-{
- size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
- if (likely(copied == bytes))
- return true;
- iov_iter_revert(i, copied);
- return false;
-}
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i);
-
struct iovec *iovec_from_user(const struct iovec __user *uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_iov, bool compat);
@@ -385,8 +332,6 @@ ssize_t import_iovec(int type, const struct iovec __user *uvec,
ssize_t __import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
struct iov_iter *i, bool compat);
-int import_single_range(int type, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i);
int import_ubuf(int type, void __user *buf, size_t len, struct iov_iter *i);
static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
@@ -395,8 +340,6 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter) {
.iter_type = ITER_UBUF,
- .copy_mc = false,
- .user_backed = true,
.data_source = direction,
.ubuf = buf,
.count = count,
@@ -433,4 +376,9 @@ static inline bool iov_iter_extract_will_pin(const struct iov_iter *iter)
return user_backed_iter(iter);
}
+struct sg_table;
+ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t len,
+ struct sg_table *sgtable, unsigned int sg_max,
+ iov_iter_extraction_t extraction_flags);
+
#endif
diff --git a/include/linux/ulpi/driver.h b/include/linux/ulpi/driver.h
index c7a1810373e3..a8cb617a3028 100644
--- a/include/linux/ulpi/driver.h
+++ b/include/linux/ulpi/driver.h
@@ -15,9 +15,9 @@ struct ulpi_ops;
* @dev: device interface
*/
struct ulpi {
+ struct device dev;
struct ulpi_device_id id;
const struct ulpi_ops *ops;
- struct device dev;
};
#define to_ulpi_dev(d) container_of(d, struct ulpi, dev)
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 5d1f6129b847..daa6a7048c11 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -42,8 +42,6 @@ call_usermodehelper_setup(const char *path, char **argv, char **envp,
extern int
call_usermodehelper_exec(struct subprocess_info *info, int wait);
-extern struct ctl_table usermodehelper_table[];
-
enum umh_disable_depth {
UMH_ENABLED = 0,
UMH_FREEZING,
diff --git a/include/linux/units.h b/include/linux/units.h
index 2793a41e73a2..45110daaf8d3 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_UNITS_H
#define _LINUX_UNITS_H
+#include <linux/bits.h>
#include <linux/math.h>
/* Metric prefixes in accordance with Système international (d'unités) */
@@ -31,6 +32,10 @@
#define MICROWATT_PER_MILLIWATT 1000UL
#define MICROWATT_PER_WATT 1000000UL
+#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE)
+#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE)
+#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE)
+
#define ABSOLUTE_ZERO_MILLICELSIUS -273150
static inline long milli_kelvin_to_millicelsius(long t)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 25f8e62a30ec..9e52179872a5 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -25,7 +25,6 @@
struct usb_device;
struct usb_driver;
-struct wusb_dev;
/*-------------------------------------------------------------------------*/
@@ -425,7 +424,6 @@ struct usb_host_config {
struct usb_host_bos {
struct usb_bos_descriptor *desc;
- /* wireless cap descriptor is handled by wusb */
struct usb_ext_cap_descriptor *ext_cap;
struct usb_ss_cap_descriptor *ss_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
@@ -612,7 +610,6 @@ struct usb3_lpm_parameters {
* WUSB devices are not, until we authorize them from user space.
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
- * @wusb: device is Wireless USB
* @lpm_capable: device supports LPM
* @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
@@ -634,10 +631,7 @@ struct usb3_lpm_parameters {
* @do_remote_wakeup: remote wakeup should be enabled
* @reset_resume: needs reset instead of resume
* @port_is_suspended: the upstream port is suspended (L2 or U3)
- * @wusb_dev: if this is a Wireless USB device, link to the WUSB
- * specific data for the device.
* @slot_id: Slot ID assigned by xHCI
- * @removable: Device can be physically removed from this port
* @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
* @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
* @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout.
@@ -696,7 +690,6 @@ struct usb_device {
unsigned have_langid:1;
unsigned authorized:1;
unsigned authenticated:1;
- unsigned wusb:1;
unsigned lpm_capable:1;
unsigned lpm_devinit_allow:1;
unsigned usb2_hw_lpm_capable:1;
@@ -727,7 +720,6 @@ struct usb_device {
unsigned reset_resume:1;
unsigned port_is_suspended:1;
- struct wusb_dev *wusb_dev;
int slot_id;
struct usb2_lpm_parameters l1_params;
struct usb3_lpm_parameters u1_params;
@@ -1152,16 +1144,6 @@ extern ssize_t usb_store_new_id(struct usb_dynids *dynids,
extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf);
/**
- * struct usbdrv_wrap - wrapper for driver-model structure
- * @driver: The driver-model core driver structure.
- * @for_devices: Non-zero for device drivers, 0 for interface drivers.
- */
-struct usbdrv_wrap {
- struct device_driver driver;
- int for_devices;
-};
-
-/**
* struct usb_driver - identifies USB interface driver to usbcore
* @name: The driver name should be unique among USB drivers,
* and should normally be the same as the module name.
@@ -1201,7 +1183,7 @@ struct usbdrv_wrap {
* is bound to the driver.
* @dynids: used internally to hold the list of dynamically added device
* ids for this driver.
- * @drvwrap: Driver-model core structure wrapper.
+ * @driver: The driver-model core driver structure.
* @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be
* added to this driver by preventing the sysfs file from being created.
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
@@ -1249,13 +1231,13 @@ struct usb_driver {
const struct attribute_group **dev_groups;
struct usb_dynids dynids;
- struct usbdrv_wrap drvwrap;
+ struct device_driver driver;
unsigned int no_dynamic_id:1;
unsigned int supports_autosuspend:1;
unsigned int disable_hub_initiated_lpm:1;
unsigned int soft_unbind:1;
};
-#define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver)
+#define to_usb_driver(d) container_of(d, struct usb_driver, driver)
/**
* struct usb_device_driver - identifies USB device driver to usbcore
@@ -1271,9 +1253,12 @@ struct usb_driver {
* module is being unloaded.
* @suspend: Called when the device is going to be suspended by the system.
* @resume: Called when the device is being resumed by the system.
+ * @choose_configuration: If non-NULL, called instead of the default
+ * usb_choose_configuration(). If this returns an error then we'll go
+ * on to call the normal usb_choose_configuration().
* @dev_groups: Attributes attached to the device that will be created once it
* is bound to the driver.
- * @drvwrap: Driver-model core structure wrapper.
+ * @driver: The driver-model core driver structure.
* @id_table: used with @match() to select better matching driver at
* probe() time.
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
@@ -1282,7 +1267,7 @@ struct usb_driver {
* resume and suspend functions will be called in addition to the driver's
* own, so this part of the setup does not need to be replicated.
*
- * USB drivers must provide all the fields listed above except drvwrap,
+ * USB drivers must provide all the fields listed above except driver,
* match, and id_table.
*/
struct usb_device_driver {
@@ -1294,14 +1279,17 @@ struct usb_device_driver {
int (*suspend) (struct usb_device *udev, pm_message_t message);
int (*resume) (struct usb_device *udev, pm_message_t message);
+
+ int (*choose_configuration) (struct usb_device *udev);
+
const struct attribute_group **dev_groups;
- struct usbdrv_wrap drvwrap;
+ struct device_driver driver;
const struct usb_device_id *id_table;
unsigned int supports_autosuspend:1;
unsigned int generic_subclass:1;
};
#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
- drvwrap.driver)
+ driver)
/**
* struct usb_class_driver - identifies a USB driver that wants to use the USB major number
@@ -1742,11 +1730,6 @@ static inline void usb_fill_bulk_urb(struct urb *urb,
* encoding of the endpoint interval, and express polling intervals in
* microframes (eight per millisecond) rather than in frames (one per
* millisecond).
- *
- * Wireless USB also uses the logarithmic encoding, but specifies it in units of
- * 128us instead of 125us. For Wireless USB devices, the interval is passed
- * through to the host controller, rather than being translated into microframe
- * units.
*/
static inline void usb_fill_int_urb(struct urb *urb,
struct usb_device *dev,
@@ -1835,22 +1818,6 @@ void *usb_alloc_coherent(struct usb_device *dev, size_t size,
void usb_free_coherent(struct usb_device *dev, size_t size,
void *addr, dma_addr_t dma);
-#if 0
-struct urb *usb_buffer_map(struct urb *urb);
-void usb_buffer_dmasync(struct urb *urb);
-void usb_buffer_unmap(struct urb *urb);
-#endif
-
-struct scatterlist;
-int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int nents);
-#if 0
-void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents);
-#endif
-void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents);
-
/*-------------------------------------------------------------------*
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 969e7dba6358..c93b410b314a 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -3,7 +3,7 @@
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
- * Wireless USB 1.0 (spread around). Linux has several APIs in C that
+ * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that
* need these:
*
* - the host side Linux-USB kernel driver API;
@@ -14,9 +14,6 @@
* act either as a USB host or as a USB device. That means the host and
* device side APIs benefit from working well together.
*
- * There's also "Wireless USB", using low power short range radios for
- * peripheral interconnection but otherwise building on the USB framework.
- *
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index ee38835ed77c..5a7f96684ea2 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -63,6 +63,8 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_IMX_IS_HSIC BIT(14)
#define CI_HDRC_PMQOS BIT(15)
#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
+#define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17)
+#define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 07531c4f4350..af3cd2aae4bc 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -35,6 +35,14 @@
* are ready. The control transfer will then be kept from completing till
* all the function drivers that requested for USB_GADGET_DELAYED_STAUS
* invoke usb_composite_setup_continue().
+ *
+ * NOTE: USB_GADGET_DELAYED_STATUS must not be used in UDC drivers: they
+ * must delay completing the status stage for 0-length control transfers
+ * regardless of the whether USB_GADGET_DELAYED_STATUS is returned from
+ * the gadget driver's setup() callback.
+ * Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS,
+ * which is a bug. These drivers must be fixed and USB_GADGET_DELAYED_STATUS
+ * must be contained within the composite framework.
*/
#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */
@@ -450,29 +458,6 @@ static inline struct usb_composite_driver *to_cdriver(
*
* One of these devices is allocated and initialized before the
* associated device driver's bind() is called.
- *
- * OPEN ISSUE: it appears that some WUSB devices will need to be
- * built by combining a normal (wired) gadget with a wireless one.
- * This revision of the gadget framework should probably try to make
- * sure doing that won't hurt too much.
- *
- * One notion for how to handle Wireless USB devices involves:
- *
- * (a) a second gadget here, discovery mechanism TBD, but likely
- * needing separate "register/unregister WUSB gadget" calls;
- * (b) updates to usb_gadget to include flags "is it wireless",
- * "is it wired", plus (presumably in a wrapper structure)
- * bandgroup and PHY info;
- * (c) presumably a wireless_ep wrapping a usb_ep, and reporting
- * wireless-specific parameters like maxburst and maxsequence;
- * (d) configurations that are specific to wireless links;
- * (e) function drivers that understand wireless configs and will
- * support wireless for (additional) function instances;
- * (f) a function to support association setup (like CBAF), not
- * necessarily requiring a wireless adapter;
- * (g) composite device setup that can create one or more wireless
- * configs, including appropriate association setup support;
- * (h) more, TBD.
*/
struct usb_composite_dev {
struct usb_gadget *gadget;
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 75bda0783395..6532beb587b1 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -711,6 +711,15 @@ static inline int usb_gadget_check_config(struct usb_gadget *gadget)
* get_interface. Setting a configuration (or interface) is where
* endpoints should be activated or (config 0) shut down.
*
+ * The gadget driver's setup() callback does not have to queue a response to
+ * ep0 within the setup() call, the driver can do it after setup() returns.
+ * The UDC driver must wait until such a response is queued before proceeding
+ * with the data/status stages of the control transfer.
+ *
+ * NOTE: Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS
+ * being returned from the setup() callback, which is a bug. See the comment
+ * next to USB_GADGET_DELAYED_STATUS for details.
+ *
* (Note that only the default control endpoint is supported. Neither
* hosts nor devices generally support control traffic except to ep0.)
*
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 0c7eff91adf4..cd77fc6095a1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -154,7 +154,6 @@ struct usb_hcd {
/* The next flag is a stopgap, to be removed when all the HCDs
* support the new root-hub polling mechanism. */
unsigned uses_new_polling:1;
- unsigned wireless:1; /* Wireless USB HCD */
unsigned has_tt:1; /* Integrated TT in root hub */
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
@@ -249,7 +248,6 @@ struct hc_driver {
#define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */
#define HCD_USB11 0x0010 /* USB 1.1 */
#define HCD_USB2 0x0020 /* USB 2.0 */
-#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/
#define HCD_USB3 0x0040 /* USB 3.0 */
#define HCD_USB31 0x0050 /* USB 3.1 */
#define HCD_USB32 0x0060 /* USB 3.2 */
@@ -267,7 +265,7 @@ struct hc_driver {
int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup);
/* called after entering D0 (etc), before resuming the hub */
- int (*pci_resume)(struct usb_hcd *hcd, bool hibernated);
+ int (*pci_resume)(struct usb_hcd *hcd, pm_message_t state);
/* called just before hibernate final D3 state, allows host to poweroff parts */
int (*pci_poweroff_late)(struct usb_hcd *hcd, bool do_wakeup);
@@ -374,8 +372,9 @@ struct hc_driver {
* or bandwidth constraints.
*/
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
- /* Returns the hardware-chosen device address */
- int (*address_device)(struct usb_hcd *, struct usb_device *udev);
+ /* Set the hardware-chosen device address */
+ int (*address_device)(struct usb_hcd *, struct usb_device *udev,
+ unsigned int timeout_ms);
/* prepares the hardware to send commands to the device */
int (*enable_device)(struct usb_hcd *, struct usb_device *udev);
/* Notifies the HCD after a hub descriptor is fetched.
@@ -486,8 +485,25 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+#ifdef CONFIG_USB_PCI_AMD
extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
+static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev,
+ const struct hc_driver *driver)
+{
+ if (!usb_hcd_amd_remote_wakeup_quirk(dev))
+ return false;
+ if (driver->flags & (HCD_USB11 | HCD_USB3))
+ return true;
+ return false;
+}
+#else /* CONFIG_USB_PCI_AMD */
+static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev,
+ const struct hc_driver *driver)
+{
+ return false;
+}
+#endif
extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif /* CONFIG_USB_PCI */
diff --git a/include/linux/usb/ljca.h b/include/linux/usb/ljca.h
new file mode 100644
index 000000000000..47661feda96c
--- /dev/null
+++ b/include/linux/usb/ljca.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_USB_LJCA_H_
+#define _LINUX_USB_LJCA_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define LJCA_MAX_GPIO_NUM 64
+
+#define auxiliary_dev_to_ljca_client(auxiliary_dev) \
+ container_of(auxiliary_dev, struct ljca_client, auxdev)
+
+struct ljca_adapter;
+
+/**
+ * typedef ljca_event_cb_t - event callback function signature
+ *
+ * @context: the execution context of who registered this callback
+ * @cmd: the command from device for this event
+ * @evt_data: the event data payload
+ * @len: the event data payload length
+ *
+ * The callback function is called in interrupt context and the data payload is
+ * only valid during the call. If the user needs later access of the data, it
+ * must copy it.
+ */
+typedef void (*ljca_event_cb_t)(void *context, u8 cmd, const void *evt_data, int len);
+
+/**
+ * struct ljca_client - represent a ljca client device
+ *
+ * @type: ljca client type
+ * @id: ljca client id within same client type
+ * @link: ljca client on the same ljca adapter
+ * @auxdev: auxiliary device object
+ * @adapter: ljca adapter the ljca client sit on
+ * @context: the execution context of the event callback
+ * @event_cb: ljca client driver register this callback to get
+ * firmware asynchronous rx buffer pending notifications
+ * @event_cb_lock: spinlock to protect event callback
+ */
+struct ljca_client {
+ u8 type;
+ u8 id;
+ struct list_head link;
+ struct auxiliary_device auxdev;
+ struct ljca_adapter *adapter;
+
+ void *context;
+ ljca_event_cb_t event_cb;
+ /* lock to protect event_cb */
+ spinlock_t event_cb_lock;
+};
+
+/**
+ * struct ljca_gpio_info - ljca gpio client device info
+ *
+ * @num: ljca gpio client device pin number
+ * @valid_pin_map: ljca gpio client device valid pin mapping
+ */
+struct ljca_gpio_info {
+ unsigned int num;
+ DECLARE_BITMAP(valid_pin_map, LJCA_MAX_GPIO_NUM);
+};
+
+/**
+ * struct ljca_i2c_info - ljca i2c client device info
+ *
+ * @id: ljca i2c client device identification number
+ * @capacity: ljca i2c client device capacity
+ * @intr_pin: ljca i2c client device interrupt pin number if exists
+ */
+struct ljca_i2c_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+};
+
+/**
+ * struct ljca_spi_info - ljca spi client device info
+ *
+ * @id: ljca spi client device identification number
+ * @capacity: ljca spi client device capacity
+ */
+struct ljca_spi_info {
+ u8 id;
+ u8 capacity;
+};
+
+/**
+ * ljca_register_event_cb - register a callback function to receive events
+ *
+ * @client: ljca client device
+ * @event_cb: callback function
+ * @context: execution context of event callback
+ *
+ * Return: 0 in case of success, negative value in case of error
+ */
+int ljca_register_event_cb(struct ljca_client *client, ljca_event_cb_t event_cb, void *context);
+
+/**
+ * ljca_unregister_event_cb - unregister the callback function for an event
+ *
+ * @client: ljca client device
+ */
+void ljca_unregister_event_cb(struct ljca_client *client);
+
+/**
+ * ljca_transfer - issue a LJCA command and wait for a response
+ *
+ * @client: ljca client device
+ * @cmd: the command to be sent to the device
+ * @obuf: the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len: the size of the buffer to be sent to the device; it should
+ * be 0 when obuf is NULL
+ * @ibuf: any data associated with the response will be copied here; it can be
+ * NULL if the user doesn't need the response data
+ * @ibuf_len: must be initialized to the input buffer size
+ *
+ * Return: the actual length of response data for success, negative value for errors
+ */
+int ljca_transfer(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len, u8 *ibuf, u8 ibuf_len);
+
+/**
+ * ljca_transfer_noack - issue a LJCA command without a response
+ *
+ * @client: ljca client device
+ * @cmd: the command to be sent to the device
+ * @obuf: the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len: the size of the buffer to be sent to the device
+ *
+ * Return: 0 for success, negative value for errors
+ */
+int ljca_transfer_noack(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len);
+
+#endif
diff --git a/include/linux/usb/midi-v2.h b/include/linux/usb/midi-v2.h
new file mode 100644
index 000000000000..16f09d959a2d
--- /dev/null
+++ b/include/linux/usb/midi-v2.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * <linux/usb/midi-v2.h> -- USB MIDI 2.0 definitions.
+ */
+
+#ifndef __LINUX_USB_MIDI_V2_H
+#define __LINUX_USB_MIDI_V2_H
+
+#include <linux/types.h>
+#include <linux/usb/midi.h>
+
+/* A.1 MS Class-Specific Interface Descriptor Types */
+#define USB_DT_CS_GR_TRM_BLOCK 0x26
+
+/* A.1 MS Class-Specific Interface Descriptor Subtypes */
+/* same as MIDI 1.0 */
+
+/* A.2 MS Class-Specific Endpoint Descriptor Subtypes */
+#define USB_MS_GENERAL_2_0 0x02
+
+/* A.3 MS Class-Specific Group Terminal Block Descriptor Subtypes */
+#define USB_MS_GR_TRM_BLOCK_UNDEFINED 0x00
+#define USB_MS_GR_TRM_BLOCK_HEADER 0x01
+#define USB_MS_GR_TRM_BLOCK 0x02
+
+/* A.4 MS Interface Header MIDIStreaming Class Revision */
+#define USB_MS_REV_MIDI_1_0 0x0100
+#define USB_MS_REV_MIDI_2_0 0x0200
+
+/* A.5 MS MIDI IN and OUT Jack Types */
+/* same as MIDI 1.0 */
+
+/* A.6 Group Terminal Block Types */
+#define USB_MS_GR_TRM_BLOCK_TYPE_BIDIRECTIONAL 0x00
+#define USB_MS_GR_TRM_BLOCK_TYPE_INPUT_ONLY 0x01
+#define USB_MS_GR_TRM_BLOCK_TYPE_OUTPUT_ONLY 0x02
+
+/* A.7 Group Terminal Default MIDI Protocol */
+#define USB_MS_MIDI_PROTO_UNKNOWN 0x00 /* Unknown (Use MIDI-CI) */
+#define USB_MS_MIDI_PROTO_1_0_64 0x01 /* MIDI 1.0, UMP up to 64bits */
+#define USB_MS_MIDI_PROTO_1_0_64_JRTS 0x02 /* MIDI 1.0, UMP up to 64bits, Jitter Reduction Timestamps */
+#define USB_MS_MIDI_PROTO_1_0_128 0x03 /* MIDI 1.0, UMP up to 128bits */
+#define USB_MS_MIDI_PROTO_1_0_128_JRTS 0x04 /* MIDI 1.0, UMP up to 128bits, Jitter Reduction Timestamps */
+#define USB_MS_MIDI_PROTO_2_0 0x11 /* MIDI 2.0 */
+#define USB_MS_MIDI_PROTO_2_0_JRTS 0x12 /* MIDI 2.0, Jitter Reduction Timestamps */
+
+/* 5.2.2.1 Class-Specific MS Interface Header Descriptor */
+/* Same as MIDI 1.0, use struct usb_ms_header_descriptor */
+
+/* 5.3.2 Class-Specific MIDI Streaming Data Endpoint Descriptor */
+struct usb_ms20_endpoint_descriptor {
+ __u8 bLength; /* 4+n */
+ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */
+ __u8 bDescriptorSubtype; /* USB_MS_GENERAL_2_0 */
+ __u8 bNumGrpTrmBlock; /* Number of Group Terminal Blocks: n */
+ __u8 baAssoGrpTrmBlkID[]; /* ID of the Group Terminal Blocks [n] */
+} __packed;
+
+#define USB_DT_MS20_ENDPOINT_SIZE(n) (4 + (n))
+
+/* As above, but more useful for defining your own descriptors: */
+#define DECLARE_USB_MS20_ENDPOINT_DESCRIPTOR(n) \
+struct usb_ms20_endpoint_descriptor_##n { \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubtype; \
+ __u8 bNumGrpTrmBlock; \
+ __u8 baAssoGrpTrmBlkID[n]; \
+} __packed
+
+/* 5.4.1 Class-Specific Group Terminal Block Header Descriptor */
+struct usb_ms20_gr_trm_block_header_descriptor {
+ __u8 bLength; /* 5 */
+ __u8 bDescriptorType; /* USB_DT_CS_GR_TRM_BLOCK */
+ __u8 bDescriptorSubtype; /* USB_MS_GR_TRM_BLOCK_HEADER */
+ __le16 wTotalLength; /* Total number of bytes */
+} __packed;
+
+/* 5.4.2.1 Group Terminal Block Descriptor */
+struct usb_ms20_gr_trm_block_descriptor {
+ __u8 bLength; /* 13 */
+ __u8 bDescriptorType; /* USB_DT_CS_GR_TRM_BLOCK */
+ __u8 bDescriptorSubtype; /* USB_MS_GR_TRM_BLOCK */
+ __u8 bGrpTrmBlkID; /* ID of this Group Terminal Block */
+ __u8 bGrpTrmBlkType; /* Group Terminal Block Type */
+ __u8 nGroupTrm; /* The first member Group Terminal in this block */
+ __u8 nNumGroupTrm; /* Number of member Group Terminals spanned */
+ __u8 iBlockItem; /* String ID of Block item */
+ __u8 bMIDIProtocol; /* Default MIDI protocol */
+ __le16 wMaxInputBandwidth; /* Max input bandwidth capability in 4kB/s */
+ __le16 wMaxOutputBandwidth; /* Max output bandwidth capability in 4kB/s */
+} __packed;
+
+#endif /* __LINUX_USB_MIDI_V2_H */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index e4a3ad3c800f..3963e55e88a3 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -99,9 +99,6 @@ struct musb_hdrc_platform_data {
/* (HOST or OTG) program PHY for external Vbus */
unsigned extvbus:1;
- /* Power the device on or off */
- int (*set_power)(int state);
-
/* MUSB configuration-specific details */
const struct musb_hdrc_config *config;
@@ -135,14 +132,4 @@ static inline int musb_mailbox(enum musb_vbus_id_status status)
#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */
#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */
-#ifdef CONFIG_ARCH_OMAP2
-
-extern int __init tusb6010_setup_interface(
- struct musb_hdrc_platform_data *data,
- unsigned ps_refclk, unsigned waitpin,
- unsigned async_cs, unsigned sync_cs,
- unsigned irq, unsigned dmachan);
-
-#endif /* OMAP2 */
-
#endif /* __LINUX_USB_MUSB_H */
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index c59fb79a42e8..eb626af0e4e7 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -228,6 +228,7 @@ enum pd_pdo_type {
#define PDO_FIXED_UNCHUNK_EXT BIT(24) /* Unchunked Extended Message supported (Source) */
#define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */
#define PDO_FIXED_FRS_CURR_SHIFT 23
+#define PDO_FIXED_PEAK_CURR_SHIFT 20
#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index b057250704e8..3a747938cdab 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -376,6 +376,7 @@
| ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
| (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7))
+#define VDO_TYPEC_CABLE_SPEED(vdo) ((vdo) & 0x7)
#define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3)
/*
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index eeb7c2157c72..59409c1fc3de 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -72,4 +72,7 @@
/* device has endpoints that should be ignored */
#define USB_QUIRK_ENDPOINT_IGNORE BIT(15)
+/* short SET_ADDRESS request timeout */
+#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT BIT(16)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
index 20d88b1defc3..33a4c146dc19 100644
--- a/include/linux/usb/r8152.h
+++ b/include/linux/usb/r8152.h
@@ -29,6 +29,8 @@
#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define VENDOR_ID_TPLINK 0x2357
+#define VENDOR_ID_DLINK 0x2001
+#define VENDOR_ID_ASUS 0x0b05
#if IS_REACHABLE(CONFIG_USB_RTL8152)
extern u8 rtl8152_get_version(struct usb_interface *intf);
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index d418c55523a7..372898d9eeb0 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -5,16 +5,6 @@
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2019 Renesas Electronics Corporation
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_H
#define RENESAS_USB_H
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 7eeb5f9c4f0d..1a0a4dc87980 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -278,7 +278,7 @@ struct usb_serial_driver {
int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss);
void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port,
const struct ktermios *old);
- void (*break_ctl)(struct tty_struct *tty, int break_state);
+ int (*break_ctl)(struct tty_struct *tty, int break_state);
unsigned int (*chars_in_buffer)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, long timeout);
bool (*tx_empty)(struct usb_serial_port *port);
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 85e95a3251d3..467e8045e9f8 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -36,7 +36,9 @@
#define TCPC_ALERT_MASK 0x12
#define TCPC_POWER_STATUS_MASK 0x14
-#define TCPC_FAULT_STATUS_MASK 0x15
+
+#define TCPC_FAULT_STATUS_MASK 0x15
+#define TCPC_FAULT_STATUS_MASK_VCONN_OC BIT(1)
#define TCPC_EXTENDED_STATUS_MASK 0x16
#define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0)
@@ -103,6 +105,8 @@
#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
#define TCPC_FAULT_STATUS 0x1f
+#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
+#define TCPC_FAULT_STATUS_VCONN_OC BIT(1)
#define TCPC_ALERT_EXTENDED 0x21
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index ab7ca872950b..65fac5e1f317 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -173,5 +173,6 @@ void tcpm_pd_hard_reset(struct tcpm_port *port);
void tcpm_tcpc_reset(struct tcpm_port *port);
void tcpm_port_clean(struct tcpm_port *port);
bool tcpm_port_is_toggling(struct tcpm_port *port);
+void tcpm_port_error_recovery(struct tcpm_port *port);
#endif /* __LINUX_USB_TCPM_H */
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 8fa781207970..a05d6f6f2536 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -202,6 +202,8 @@ struct typec_cable_desc {
* @accessory: Audio, Debug or none.
* @identity: Discover Identity command data
* @pd_revision: USB Power Delivery Specification Revision if supported
+ * @attach: Notification about attached USB device
+ * @deattach: Notification about removed USB device
*
* Details about a partner that is attached to USB Type-C port. If @identity
* member exists when partner is registered, a directory named "identity" is
@@ -217,6 +219,9 @@ struct typec_partner_desc {
enum typec_accessory accessory;
struct usb_pd_identity *identity;
u16 pd_revision; /* 0300H = "3.0" */
+
+ void (*attach)(struct typec_partner *partner, struct device *dev);
+ void (*deattach)(struct typec_partner *partner, struct device *dev);
};
/**
@@ -335,4 +340,36 @@ int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_
int typec_partner_set_usb_power_delivery(struct typec_partner *partner,
struct usb_power_delivery *pd);
+/**
+ * struct typec_connector - Representation of Type-C port for external drivers
+ * @attach: notification about device removal
+ * @deattach: notification about device removal
+ *
+ * Drivers that control the USB and other ports (DisplayPorts, etc.), that are
+ * connected to the Type-C connectors, can use these callbacks to inform the
+ * Type-C connector class about connections and disconnections. That information
+ * can then be used by the typec-port drivers to power on or off parts that are
+ * needed or not needed - as an example, in USB mode if USB2 device is
+ * enumerated, USB3 components (retimers, phys, and what have you) do not need
+ * to be powered on.
+ *
+ * The attached (enumerated) devices will be liked with the typec-partner device.
+ */
+struct typec_connector {
+ void (*attach)(struct typec_connector *con, struct device *dev);
+ void (*deattach)(struct typec_connector *con, struct device *dev);
+};
+
+static inline void typec_attach(struct typec_connector *con, struct device *dev)
+{
+ if (con && con->attach)
+ con->attach(con, dev);
+}
+
+static inline void typec_deattach(struct typec_connector *con, struct device *dev)
+{
+ if (con && con->deattach)
+ con->deattach(con, dev);
+}
+
#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index 350d49012659..28aeef8f9e7b 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -67,7 +67,7 @@ struct typec_altmode_ops {
int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
int typec_altmode_exit(struct typec_altmode *altmode);
-void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
+int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
int typec_altmode_vdm(struct typec_altmode *altmode,
const u32 header, const u32 *vdo, int count);
int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index 8d09c2f0a9b8..1f358098522d 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -67,8 +67,10 @@ enum {
#define DP_CAP_UFP_D 1
#define DP_CAP_DFP_D 2
#define DP_CAP_DFP_D_AND_UFP_D 3
-#define DP_CAP_DP_SIGNALING BIT(2) /* Always set */
-#define DP_CAP_GEN2 BIT(3) /* Reserved after v1.0b */
+#define DP_CAP_DP_SIGNALLING(_cap_) (((_cap_) & GENMASK(5, 2)) >> 2)
+#define DP_CAP_SIGNALLING_HBR3 1
+#define DP_CAP_SIGNALLING_UHBR10 2
+#define DP_CAP_SIGNALLING_UHBR20 3
#define DP_CAP_RECEPTACLE BIT(6)
#define DP_CAP_USB BIT(7)
#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
@@ -78,6 +80,13 @@ enum {
DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
+#define DP_CAP_UHBR_13_5_SUPPORT BIT(26)
+#define DP_CAP_CABLE_TYPE(_cap_) (((_cap_) & GENMASK(29, 28)) >> 28)
+#define DP_CAP_CABLE_TYPE_PASSIVE 0
+#define DP_CAP_CABLE_TYPE_RE_TIMER 1
+#define DP_CAP_CABLE_TYPE_RE_DRIVER 2
+#define DP_CAP_CABLE_TYPE_OPTICAL 3
+#define DP_CAP_DPAM_VERSION BIT(30)
/* DisplayPort Status Update VDO bits */
#define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
@@ -97,13 +106,24 @@ enum {
#define DP_CONF_CURRENTLY(_conf_) ((_conf_) & 3)
#define DP_CONF_UFP_U_AS_DFP_D BIT(0)
#define DP_CONF_UFP_U_AS_UFP_D BIT(1)
-#define DP_CONF_SIGNALING_DP BIT(2)
-#define DP_CONF_SIGNALING_GEN_2 BIT(3) /* Reserved after v1.0b */
+#define DP_CONF_SIGNALLING_MASK GENMASK(5, 2)
+#define DP_CONF_SIGNALLING_SHIFT 2
+#define DP_CONF_SIGNALLING_HBR3 1
+#define DP_CONF_SIGNALLING_UHBR10 2
+#define DP_CONF_SIGNALLING_UHBR20 3
#define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8
#define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8)
/* Helper for setting/getting the pin assignment value to the configuration */
#define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8)
#define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8)
+#define DP_CONF_UHBR13_5_SUPPORT BIT(26)
+#define DP_CONF_CABLE_TYPE_MASK GENMASK(29, 28)
+#define DP_CONF_CABLE_TYPE_SHIFT 28
+#define DP_CONF_CABLE_TYPE_PASSIVE 0
+#define DP_CONF_CABLE_TYPE_RE_TIMER 1
+#define DP_CONF_CABLE_TYPE_RE_DRIVER 2
+#define DP_CONF_CABLE_TYPE_OPTICAL 3
+#define DP_CONF_DPAM_VERSION BIT(30)
#endif /* __USB_TYPEC_DP_H */
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index 9292f0e07846..2489a7857d8e 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -60,8 +60,7 @@ struct typec_mux_desc {
#if IS_ENABLED(CONFIG_TYPEC)
-struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
- const struct typec_altmode_desc *desc);
+struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode);
void typec_mux_put(struct typec_mux *mux);
int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state);
@@ -74,8 +73,7 @@ void *typec_mux_get_drvdata(struct typec_mux_dev *mux);
#else
-static inline struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
- const struct typec_altmode_desc *desc)
+static inline struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -102,10 +100,9 @@ static inline void *typec_mux_get_drvdata(struct typec_mux_dev *mux)
#endif /* CONFIG_TYPEC */
-static inline struct typec_mux *
-typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc)
+static inline struct typec_mux *typec_mux_get(struct device *dev)
{
- return fwnode_typec_mux_get(dev_fwnode(dev), desc);
+ return fwnode_typec_mux_get(dev_fwnode(dev));
}
#endif /* __USB_TYPEC_MUX */
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
index 63dd44b72e0c..c7a2153bd6f5 100644
--- a/include/linux/usb/typec_tbt.h
+++ b/include/linux/usb/typec_tbt.h
@@ -46,6 +46,7 @@ struct typec_thunderbolt_data {
#define TBT_CABLE_OPTICAL BIT(21)
#define TBT_CABLE_RETIMER BIT(22)
#define TBT_CABLE_LINK_TRAINING BIT(23)
+#define TBT_CABLE_ACTIVE_PASSIVE BIT(25)
#define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16)
#define TBT_SET_CABLE_ROUNDED(_g_) (((_g_) & GENMASK(1, 0)) << 19)
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 45f09bec02c4..6030a8235617 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -65,6 +65,10 @@ enum rlimit_type {
UCOUNT_RLIMIT_COUNTS,
};
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+struct binfmt_misc;
+#endif
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -102,6 +106,10 @@ struct user_namespace {
struct ucounts *ucounts;
long ucount_max[UCOUNT_COUNTS];
long rlimit_max[UCOUNT_RLIMIT_COUNTS];
+
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+ struct binfmt_misc *binfmt_misc;
+#endif
} __randomize_layout;
struct ucounts {
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index d78b01524349..e4056547fbe6 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -46,6 +46,7 @@ enum mfill_atomic_mode {
MFILL_ATOMIC_COPY,
MFILL_ATOMIC_ZEROPAGE,
MFILL_ATOMIC_CONTINUE,
+ MFILL_ATOMIC_POISON,
NR_MFILL_ATOMIC_MODES,
};
@@ -83,12 +84,26 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long len, atomic_t *mmap_changing,
uffd_flags_t flags);
+extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
+ unsigned long len, atomic_t *mmap_changing,
+ uffd_flags_t flags);
extern int mwriteprotect_range(struct mm_struct *dst_mm,
unsigned long start, unsigned long len,
bool enable_wp, atomic_t *mmap_changing);
extern long uffd_wp_range(struct vm_area_struct *vma,
unsigned long start, unsigned long len, bool enable_wp);
+/* move_pages */
+void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
+void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
+ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
+ unsigned long dst_start, unsigned long src_start,
+ unsigned long len, __u64 flags);
+int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma,
+ unsigned long dst_addr, unsigned long src_addr);
+
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
@@ -157,11 +172,22 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
}
static inline bool vma_can_userfault(struct vm_area_struct *vma,
- unsigned long vm_flags)
+ unsigned long vm_flags,
+ bool wp_async)
{
+ vm_flags &= __VM_UFFD_FLAGS;
+
if ((vm_flags & VM_UFFD_MINOR) &&
(!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
return false;
+
+ /*
+ * If wp async enabled, and WP is the only mode enabled, allow any
+ * memory type.
+ */
+ if (wp_async && (vm_flags == VM_UFFD_WP))
+ return true;
+
#ifndef CONFIG_PTE_MARKER_UFFD_WP
/*
* If user requested uffd-wp but not enabled pte markers for
@@ -171,6 +197,8 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
return false;
#endif
+
+ /* By default, allow any of anon|shmem|hugetlb */
return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
vma_is_shmem(vma);
}
@@ -188,11 +216,12 @@ extern bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
-extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
- unsigned long end, struct list_head *uf);
+extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, struct list_head *uf);
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
struct list_head *uf);
extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
+extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
#else /* CONFIG_USERFAULTFD */
@@ -203,6 +232,13 @@ static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
return VM_FAULT_SIGBUS;
}
+static inline long uffd_wp_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long len,
+ bool enable_wp)
+{
+ return false;
+}
+
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
{
@@ -271,7 +307,7 @@ static inline bool userfaultfd_remove(struct vm_area_struct *vma,
return true;
}
-static inline int userfaultfd_unmap_prep(struct mm_struct *mm,
+static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct list_head *uf)
{
@@ -293,6 +329,11 @@ static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
return false;
}
+static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
+{
+ return false;
+}
+
#endif /* CONFIG_USERFAULTFD */
static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index db1b0eaef4eb..db15ac07f8a6 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -204,10 +204,23 @@ struct vdpa_map_file {
* @vdev: vdpa device
* @idx: virtqueue index
* Returns u32: group id for this virtqueue
+ * @get_vq_desc_group: Get the group id for the descriptor table of
+ * a specific virtqueue (optional)
+ * @vdev: vdpa device
+ * @idx: virtqueue index
+ * Returns u32: group id for the descriptor table
+ * portion of this virtqueue. Could be different
+ * than the one from @get_vq_group, in which case
+ * the access to the descriptor table can be
+ * confined to a separate asid, isolating from
+ * the virtqueue's buffer address access.
* @get_device_features: Get virtio features supported by the device
* @vdev: vdpa device
* Returns the virtio features support by the
* device
+ * @get_backend_features: Get parent-specific backend features (optional)
+ * Returns the vdpa features supported by the
+ * device.
* @set_driver_features: Set virtio features supported by the driver
* @vdev: vdpa device
* @features: feature support by the driver
@@ -239,6 +252,17 @@ struct vdpa_map_file {
* @reset: Reset device
* @vdev: vdpa device
* Returns integer: success (0) or error (< 0)
+ * @compat_reset: Reset device with compatibility quirks to
+ * accommodate older userspace. Only needed by
+ * parent driver which used to have bogus reset
+ * behaviour, and has to maintain such behaviour
+ * for compatibility with older userspace.
+ * Historically compliant driver only has to
+ * implement .reset, Historically non-compliant
+ * driver should implement both.
+ * @vdev: vdpa device
+ * @flags: compatibility quirks for reset
+ * Returns integer: success (0) or error (< 0)
* @suspend: Suspend the device (optional)
* @vdev: vdpa device
* Returns integer: success (0) or error (< 0)
@@ -314,6 +338,15 @@ struct vdpa_map_file {
* @iova: iova to be unmapped
* @size: size of the area
* Returns integer: success (0) or error (< 0)
+ * @reset_map: Reset device memory mapping to the default
+ * state (optional)
+ * Needed for devices that are using device
+ * specific DMA translation and prefer mapping
+ * to be decoupled from the virtio life cycle,
+ * i.e. device .reset op does not reset mapping
+ * @vdev: vdpa device
+ * @asid: address space identifier
+ * Returns integer: success (0) or error (< 0)
* @get_vq_dma_dev: Get the dma device for a specific
* virtqueue (optional)
* @vdev: vdpa device
@@ -357,7 +390,9 @@ struct vdpa_config_ops {
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
+ u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx);
u64 (*get_device_features)(struct vdpa_device *vdev);
+ u64 (*get_backend_features)(const struct vdpa_device *vdev);
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
u64 (*get_driver_features)(struct vdpa_device *vdev);
void (*set_config_cb)(struct vdpa_device *vdev,
@@ -369,6 +404,8 @@ struct vdpa_config_ops {
u8 (*get_status)(struct vdpa_device *vdev);
void (*set_status)(struct vdpa_device *vdev, u8 status);
int (*reset)(struct vdpa_device *vdev);
+ int (*compat_reset)(struct vdpa_device *vdev, u32 flags);
+#define VDPA_RESET_F_CLEAN_MAP 1
int (*suspend)(struct vdpa_device *vdev);
int (*resume)(struct vdpa_device *vdev);
size_t (*get_config_size)(struct vdpa_device *vdev);
@@ -390,6 +427,7 @@ struct vdpa_config_ops {
u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
u64 iova, u64 size);
+ int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
unsigned int asid);
struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
@@ -481,14 +519,17 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
return vdev->dma_dev;
}
-static inline int vdpa_reset(struct vdpa_device *vdev)
+static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
{
const struct vdpa_config_ops *ops = vdev->config;
int ret;
down_write(&vdev->cf_lock);
vdev->features_valid = false;
- ret = ops->reset(vdev);
+ if (ops->compat_reset && flags)
+ ret = ops->compat_reset(vdev, flags);
+ else
+ ret = ops->reset(vdev);
up_write(&vdev->cf_lock);
return ret;
}
diff --git a/include/linux/verification.h b/include/linux/verification.h
index f34e50ebcf60..cb2d47f28091 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -8,6 +8,7 @@
#ifndef _LINUX_VERIFICATION_H
#define _LINUX_VERIFICATION_H
+#include <linux/errno.h>
#include <linux/types.h>
/*
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 2c137ea94a3e..89b265bc6ec3 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -13,6 +13,7 @@
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/poll.h>
+#include <linux/cdev.h>
#include <uapi/linux/vfio.h>
#include <linux/iova_bitmap.h>
@@ -42,7 +43,11 @@ struct vfio_device {
*/
const struct vfio_migration_ops *mig_ops;
const struct vfio_log_ops *log_ops;
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
struct vfio_group *group;
+ struct list_head group_next;
+ struct list_head iommu_entry;
+#endif
struct vfio_device_set *dev_set;
struct list_head dev_set_list;
unsigned int migration_flags;
@@ -51,16 +56,25 @@ struct vfio_device {
/* Members below here are private, not for driver use */
unsigned int index;
struct device device; /* device.kref covers object life circle */
+#if IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV)
+ struct cdev cdev;
+#endif
refcount_t refcount; /* user count on registered device*/
unsigned int open_count;
struct completion comp;
- struct list_head group_next;
- struct list_head iommu_entry;
struct iommufd_access *iommufd_access;
void (*put_kvm)(struct kvm *kvm);
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
- bool iommufd_attached;
+ u8 iommufd_attached:1;
+#endif
+ u8 cdev_opened:1;
+#ifdef CONFIG_DEBUG_FS
+ /*
+ * debug_root is a static property of the vfio_device
+ * which must be set prior to registering the vfio_device.
+ */
+ struct dentry *debug_root;
#endif
};
@@ -73,7 +87,9 @@ struct vfio_device {
* @bind_iommufd: Called when binding the device to an iommufd
* @unbind_iommufd: Opposite of bind_iommufd
* @attach_ioas: Called when attaching device to an IOAS/HWPT managed by the
- * bound iommufd. Undo in unbind_iommufd.
+ * bound iommufd. Undo in unbind_iommufd if @detach_ioas is not
+ * called.
+ * @detach_ioas: Opposite of attach_ioas
* @open_device: Called when the first file descriptor is opened for this device
* @close_device: Opposite of open_device
* @read: Perform read(2) on device file descriptor
@@ -97,6 +113,7 @@ struct vfio_device_ops {
struct iommufd_ctx *ictx, u32 *out_device_id);
void (*unbind_iommufd)(struct vfio_device *vdev);
int (*attach_ioas)(struct vfio_device *vdev, u32 *pt_id);
+ void (*detach_ioas)(struct vfio_device *vdev);
int (*open_device)(struct vfio_device *vdev);
void (*close_device)(struct vfio_device *vdev);
ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
@@ -114,15 +131,31 @@ struct vfio_device_ops {
};
#if IS_ENABLED(CONFIG_IOMMUFD)
+struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev);
+int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx);
int vfio_iommufd_physical_bind(struct vfio_device *vdev,
struct iommufd_ctx *ictx, u32 *out_device_id);
void vfio_iommufd_physical_unbind(struct vfio_device *vdev);
int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
+void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev);
int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
struct iommufd_ctx *ictx, u32 *out_device_id);
void vfio_iommufd_emulated_unbind(struct vfio_device *vdev);
int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
+void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev);
#else
+static inline struct iommufd_ctx *
+vfio_iommufd_device_ictx(struct vfio_device *vdev)
+{
+ return NULL;
+}
+
+static inline int
+vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
+{
+ return VFIO_PCI_DEVID_NOT_OWNED;
+}
+
#define vfio_iommufd_physical_bind \
((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
u32 *out_device_id)) NULL)
@@ -130,6 +163,8 @@ int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
((void (*)(struct vfio_device *vdev)) NULL)
#define vfio_iommufd_physical_attach_ioas \
((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
+#define vfio_iommufd_physical_detach_ioas \
+ ((void (*)(struct vfio_device *vdev)) NULL)
#define vfio_iommufd_emulated_bind \
((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
u32 *out_device_id)) NULL)
@@ -137,8 +172,15 @@ int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
((void (*)(struct vfio_device *vdev)) NULL)
#define vfio_iommufd_emulated_attach_ioas \
((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
+#define vfio_iommufd_emulated_detach_ioas \
+ ((void (*)(struct vfio_device *vdev)) NULL)
#endif
+static inline bool vfio_device_cdev_opened(struct vfio_device *device)
+{
+ return device->cdev_opened;
+}
+
/**
* struct vfio_migration_ops - VFIO bus device driver migration callbacks
*
@@ -239,20 +281,40 @@ void vfio_unregister_group_dev(struct vfio_device *device);
int vfio_assign_device_set(struct vfio_device *device, void *set_id);
unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
+struct vfio_device *
+vfio_find_device_in_devset(struct vfio_device_set *dev_set,
+ struct device *dev);
int vfio_mig_get_next_state(struct vfio_device *device,
enum vfio_device_mig_state cur_fsm,
enum vfio_device_mig_state new_fsm,
enum vfio_device_mig_state *next_fsm);
+void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes);
+
/*
* External user API
*/
struct iommu_group *vfio_file_iommu_group(struct file *file);
+
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
bool vfio_file_is_group(struct file *file);
+bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
+#else
+static inline bool vfio_file_is_group(struct file *file)
+{
+ return false;
+}
+
+static inline bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
+{
+ return false;
+}
+#endif
+bool vfio_file_is_valid(struct file *file);
bool vfio_file_enforced_coherent(struct file *file);
void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
-bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index 367fd79226a3..85e84b92751b 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -59,8 +59,7 @@ struct vfio_pci_core_device {
struct perm_bits *msi_perm;
spinlock_t irqlock;
struct mutex igate;
- struct vfio_pci_irq_ctx *ctx;
- int num_ctx;
+ struct xarray ctx;
int irq_type;
int num_regions;
struct vfio_pci_region *region;
@@ -69,17 +68,18 @@ struct vfio_pci_core_device {
u16 msix_size;
u32 msix_offset;
u32 rbar[7];
- bool pci_2_3;
- bool virq_disabled;
- bool reset_works;
- bool extended_caps;
- bool bardirty;
- bool has_vga;
- bool needs_reset;
- bool nointx;
- bool needs_pm_restore;
- bool pm_intx_masked;
- bool pm_runtime_engaged;
+ bool has_dyn_msix:1;
+ bool pci_2_3:1;
+ bool virq_disabled:1;
+ bool reset_works:1;
+ bool extended_caps:1;
+ bool bardirty:1;
+ bool has_vga:1;
+ bool needs_reset:1;
+ bool nointx:1;
+ bool needs_pm_restore:1;
+ bool pm_intx_masked:1;
+ bool pm_runtime_engaged:1;
struct pci_saved_state *pci_saved_state;
struct pci_saved_state *pm_save;
int ioeventfds_nr;
@@ -127,7 +127,27 @@ int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
+int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar);
pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
pci_channel_state_t state);
+#define VFIO_IOWRITE_DECLATION(size) \
+int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev, \
+ bool test_mem, u##size val, void __iomem *io);
+
+VFIO_IOWRITE_DECLATION(8)
+VFIO_IOWRITE_DECLATION(16)
+VFIO_IOWRITE_DECLATION(32)
+#ifdef iowrite64
+VFIO_IOWRITE_DECLATION(64)
+#endif
+
+#define VFIO_IOREAD_DECLATION(size) \
+int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \
+ bool test_mem, u##size *val, void __iomem *io);
+
+VFIO_IOREAD_DECLATION(8)
+VFIO_IOREAD_DECLATION(16)
+VFIO_IOREAD_DECLATION(32)
+
#endif /* VFIO_PCI_CORE_H */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index b4b9137f9792..97129a1bbb7d 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: MIT */
+
/*
* The VGA aribiter manages VGA space routing and VGA resource decode to
* allow multiple VGA devices to be used in a system in a safe way.
@@ -5,27 +7,6 @@
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS
- * IN THE SOFTWARE.
- *
*/
#ifndef LINUX_VGA_H
@@ -96,7 +77,7 @@ static inline int vga_client_register(struct pci_dev *pdev,
static inline int vga_get_interruptible(struct pci_dev *pdev,
unsigned int rsrc)
{
- return vga_get(pdev, rsrc, 1);
+ return vga_get(pdev, rsrc, 1);
}
/**
@@ -111,7 +92,7 @@ static inline int vga_get_interruptible(struct pci_dev *pdev,
static inline int vga_get_uninterruptible(struct pci_dev *pdev,
unsigned int rsrc)
{
- return vga_get(pdev, rsrc, 0);
+ return vga_get(pdev, rsrc, 0);
}
static inline void vga_client_unregister(struct pci_dev *pdev)
diff --git a/include/linux/via-gpio.h b/include/linux/via-gpio.h
deleted file mode 100644
index ac34668fd442..000000000000
--- a/include/linux/via-gpio.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Support for viafb GPIO ports.
- *
- * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
- */
-
-#ifndef __VIA_GPIO_H__
-#define __VIA_GPIO_H__
-
-extern int viafb_gpio_lookup(const char *name);
-extern int viafb_gpio_init(void);
-extern void viafb_gpio_exit(void);
-#endif
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b93238db94e3..b0201747a263 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -61,6 +62,8 @@ int virtqueue_add_sgs(struct virtqueue *vq,
void *data,
gfp_t gfp);
+struct device *virtqueue_dma_dev(struct virtqueue *vq);
+
bool virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq);
@@ -78,6 +81,8 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
+int virtqueue_set_dma_premapped(struct virtqueue *_vq);
+
bool virtqueue_poll(struct virtqueue *vq, unsigned);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
@@ -95,6 +100,16 @@ dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
int virtqueue_resize(struct virtqueue *vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf));
+int virtqueue_reset(struct virtqueue *vq,
+ void (*recycle)(struct virtqueue *vq, void *buf));
+
+struct virtio_admin_cmd {
+ __le16 opcode;
+ __le16 group_type;
+ __le64 group_member_id;
+ struct scatterlist *data_sg;
+ struct scatterlist *result_sg;
+};
/**
* struct virtio_device - representation of a device using virtio
@@ -103,6 +118,7 @@ int virtqueue_resize(struct virtqueue *vq, u32 num,
* @config_enabled: configuration change reporting enabled
* @config_change_pending: configuration change reported while disabled
* @config_lock: protects configuration change reporting
+ * @vqs_list_lock: protects @vqs.
* @dev: underlying device.
* @id: the device type identification (used to match it with a driver).
* @config: the configuration ops for this device.
@@ -117,7 +133,7 @@ struct virtio_device {
bool config_enabled;
bool config_change_pending;
spinlock_t config_lock;
- spinlock_t vqs_list_lock; /* Protects VQs list access */
+ spinlock_t vqs_list_lock;
struct device dev;
struct virtio_device_id id;
const struct virtio_config_ops *config;
@@ -160,6 +176,8 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev);
* @feature_table_size: number of entries in the feature table array.
* @feature_table_legacy: same as feature_table but when working in legacy mode.
* @feature_table_size_legacy: number of entries in feature table legacy array.
+ * @validate: the function to call to validate features and config space.
+ * Returns 0 or -errno.
* @probe: the function to call when a device is found. Returns 0 or -errno.
* @scan: optional function to call after successful probe; intended
* for virtio-scsi to invoke a scan.
@@ -181,10 +199,8 @@ struct virtio_driver {
void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
void (*config_changed)(struct virtio_device *dev);
-#ifdef CONFIG_PM
int (*freeze)(struct virtio_device *dev);
int (*restore)(struct virtio_device *dev);
-#endif
};
static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
@@ -203,4 +219,19 @@ void unregister_virtio_driver(struct virtio_driver *drv);
#define module_virtio_driver(__virtio_driver) \
module_driver(__virtio_driver, register_virtio_driver, \
unregister_virtio_driver)
+
+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
+
+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
#endif /* _LINUX_VIRTIO_H */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 2b3438de2c4d..da9b271b54db 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -93,6 +93,8 @@ typedef void vq_callback_t(struct virtqueue *);
* Returns 0 on success or error status
* If disable_vq_and_reset is set, then enable_vq_after_reset must also be
* set.
+ * @create_avq: create admin virtqueue resource.
+ * @destroy_avq: destroy admin virtqueue resource.
*/
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
@@ -120,6 +122,8 @@ struct virtio_config_ops {
struct virtio_shm_region *region, u8 id);
int (*disable_vq_and_reset)(struct virtqueue *vq);
int (*enable_vq_after_reset)(struct virtqueue *vq);
+ int (*create_avq)(struct virtio_device *vdev);
+ void (*destroy_avq)(struct virtio_device *vdev);
};
/* If driver didn't advertise the feature, it will never appear. */
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
deleted file mode 100644
index d2e2785af602..000000000000
--- a/include/linux/virtio_console.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
- * anyone can use the definitions to implement compatible drivers/servers:
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of IBM nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Copyright (C) Red Hat, Inc., 2009, 2010, 2011
- * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
- */
-#ifndef _LINUX_VIRTIO_CONSOLE_H
-#define _LINUX_VIRTIO_CONSOLE_H
-
-#include <uapi/linux/virtio_console.h>
-
-int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
-#endif /* _LINUX_VIRTIO_CONSOLE_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index bdf8de2cdd93..4dfa9b69ca8d 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -3,8 +3,10 @@
#define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
#include <uapi/linux/tcp.h>
-#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
@@ -49,6 +51,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
const struct virtio_net_hdr *hdr,
bool little_endian)
{
+ unsigned int nh_min_len = sizeof(struct iphdr);
unsigned int gso_type = 0;
unsigned int thlen = 0;
unsigned int p_off = 0;
@@ -65,6 +68,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
gso_type = SKB_GSO_TCPV6;
ip_proto = IPPROTO_TCP;
thlen = sizeof(struct tcphdr);
+ nh_min_len = sizeof(struct ipv6hdr);
break;
case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP;
@@ -100,7 +104,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
- p_off = skb_transport_offset(skb) + thlen;
+ nh_min_len = max_t(u32, nh_min_len, skb_transport_offset(skb));
+ p_off = nh_min_len + thlen;
if (!pskb_may_pull(skb, p_off))
return -EINVAL;
} else {
@@ -140,7 +145,7 @@ retry:
skb_set_transport_header(skb, keys.control.thoff);
} else if (gso_type) {
- p_off = thlen;
+ p_off = nh_min_len + thlen;
if (!pskb_may_pull(skb, p_off))
return -EINVAL;
}
@@ -151,9 +156,26 @@ retry:
unsigned int nh_off = p_off;
struct skb_shared_info *shinfo = skb_shinfo(skb);
- /* UFO may not include transport header in gso_size. */
- if (gso_type & SKB_GSO_UDP)
+ switch (gso_type & ~SKB_GSO_TCP_ECN) {
+ case SKB_GSO_UDP:
+ /* UFO may not include transport header in gso_size. */
nh_off -= thlen;
+ break;
+ case SKB_GSO_UDP_L4:
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return -EINVAL;
+ if (skb->csum_offset != offsetof(struct udphdr, check))
+ return -EINVAL;
+ if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
+ return -EINVAL;
+ if (gso_type != SKB_GSO_UDP_L4)
+ return -EINVAL;
+ break;
+ }
+
+ /* Kernel has a special handling for GSO_BY_FRAGS. */
+ if (gso_size == GSO_BY_FRAGS)
+ return -EINVAL;
/* Too small packets are not really GSO ones. */
if (skb->len - nh_off > gso_size) {
diff --git a/include/linux/virtio_pci_admin.h b/include/linux/virtio_pci_admin.h
new file mode 100644
index 000000000000..f4a100a0fe2e
--- /dev/null
+++ b/include/linux/virtio_pci_admin.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_ADMIN_H
+#define _LINUX_VIRTIO_PCI_ADMIN_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
+bool virtio_pci_admin_has_legacy_io(struct pci_dev *pdev);
+int virtio_pci_admin_legacy_common_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_common_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_device_io_write(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_device_io_read(struct pci_dev *pdev, u8 offset,
+ u8 size, u8 *buf);
+int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev,
+ u8 req_bar_flags, u8 *bar,
+ u64 *bar_offset);
+#endif
+
+#endif /* _LINUX_VIRTIO_PCI_ADMIN_H */
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
index c4eeb79b0139..c0b1b1ca1163 100644
--- a/include/linux/virtio_pci_modern.h
+++ b/include/linux/virtio_pci_modern.h
@@ -5,39 +5,49 @@
#include <linux/pci.h>
#include <linux/virtio_pci.h>
-struct virtio_pci_modern_common_cfg {
- struct virtio_pci_common_cfg cfg;
-
- __le16 queue_notify_data; /* read-write */
- __le16 queue_reset; /* read-write */
-};
-
+/**
+ * struct virtio_pci_modern_device - info for modern PCI virtio
+ * @pci_dev: Ptr to the PCI device struct
+ * @common: Position of the common capability in the PCI config
+ * @device: Device-specific data (non-legacy mode)
+ * @notify_base: Base of vq notifications (non-legacy mode)
+ * @notify_pa: Physical base of vq notifications
+ * @isr: Where to read and clear interrupt
+ * @notify_len: So we can sanity-check accesses
+ * @device_len: So we can sanity-check accesses
+ * @notify_map_cap: Capability for when we need to map notifications per-vq
+ * @notify_offset_multiplier: Multiply queue_notify_off by this value
+ * (non-legacy mode).
+ * @modern_bars: Bitmask of BARs
+ * @id: Device and vendor id
+ * @device_id_check: Callback defined before vp_modern_probe() to be used to
+ * verify the PCI device is a vendor's expected device rather
+ * than the standard virtio PCI device
+ * Returns the found device id or ERRNO
+ * @dma_mask: Optional mask instead of the traditional DMA_BIT_MASK(64),
+ * for vendor devices with DMA space address limitations
+ */
struct virtio_pci_modern_device {
struct pci_dev *pci_dev;
struct virtio_pci_common_cfg __iomem *common;
- /* Device-specific data (non-legacy mode) */
void __iomem *device;
- /* Base of vq notifications (non-legacy mode). */
void __iomem *notify_base;
- /* Physical base of vq notifications */
resource_size_t notify_pa;
- /* Where to read and clear interrupt */
u8 __iomem *isr;
- /* So we can sanity-check accesses. */
size_t notify_len;
size_t device_len;
+ size_t common_len;
- /* Capability for when we need to map notifications per-vq. */
int notify_map_cap;
- /* Multiply queue_notify_off by this value. (non-legacy mode). */
u32 notify_offset_multiplier;
-
int modern_bars;
-
struct virtio_device_id id;
+
+ int (*device_id_check)(struct pci_dev *pdev);
+ u64 dma_mask;
};
/*
@@ -115,4 +125,6 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev);
void vp_modern_remove(struct virtio_pci_modern_device *mdev);
int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
+u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev);
+u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev);
#endif
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index c58453699ee9..c82089dee0c8 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -12,6 +12,7 @@
struct virtio_vsock_skb_cb {
bool reply;
bool tap_delivered;
+ u32 offset;
};
#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
@@ -159,6 +160,15 @@ struct virtio_transport {
/* Takes ownership of the packet */
int (*send_pkt)(struct sk_buff *skb);
+
+ /* Used in MSG_ZEROCOPY mode. Checks, that provided data
+ * (number of buffers) could be transmitted with zerocopy
+ * mode. If this callback is not implemented for the current
+ * transport - this means that this transport doesn't need
+ * extra checks and can perform zerocopy transmission by
+ * default.
+ */
+ bool (*can_msgzerocopy)(int bufs_num);
};
ssize_t
@@ -246,4 +256,5 @@ void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t read_actor);
+int virtio_transport_notify_set_rcvlowat(struct vsock_sock *vsk, int val);
#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h
deleted file mode 100644
index e9c0cd36c48a..000000000000
--- a/include/linux/vlynq.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#ifndef __VLYNQ_H__
-#define __VLYNQ_H__
-
-#include <linux/device.h>
-#include <linux/types.h>
-
-struct module;
-
-#define VLYNQ_NUM_IRQS 32
-
-struct vlynq_mapping {
- u32 size;
- u32 offset;
-};
-
-enum vlynq_divisor {
- vlynq_div_auto = 0,
- vlynq_ldiv1,
- vlynq_ldiv2,
- vlynq_ldiv3,
- vlynq_ldiv4,
- vlynq_ldiv5,
- vlynq_ldiv6,
- vlynq_ldiv7,
- vlynq_ldiv8,
- vlynq_rdiv1,
- vlynq_rdiv2,
- vlynq_rdiv3,
- vlynq_rdiv4,
- vlynq_rdiv5,
- vlynq_rdiv6,
- vlynq_rdiv7,
- vlynq_rdiv8,
- vlynq_div_external
-};
-
-struct vlynq_device_id {
- u32 id;
- enum vlynq_divisor divisor;
- unsigned long driver_data;
-};
-
-struct vlynq_regs;
-struct vlynq_device {
- u32 id, dev_id;
- int local_irq;
- int remote_irq;
- enum vlynq_divisor divisor;
- u32 regs_start, regs_end;
- u32 mem_start, mem_end;
- u32 irq_start, irq_end;
- int irq;
- int enabled;
- struct vlynq_regs *local;
- struct vlynq_regs *remote;
- struct device dev;
-};
-
-struct vlynq_driver {
- char *name;
- struct vlynq_device_id *id_table;
- int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id);
- void (*remove)(struct vlynq_device *dev);
- struct device_driver driver;
-};
-
-struct plat_vlynq_ops {
- int (*on)(struct vlynq_device *dev);
- void (*off)(struct vlynq_device *dev);
-};
-
-static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv)
-{
- return container_of(drv, struct vlynq_driver, driver);
-}
-
-static inline struct vlynq_device *to_vlynq_device(struct device *device)
-{
- return container_of(device, struct vlynq_device, dev);
-}
-
-extern struct bus_type vlynq_bus_type;
-
-extern int __vlynq_register_driver(struct vlynq_driver *driver,
- struct module *owner);
-
-static inline int vlynq_register_driver(struct vlynq_driver *driver)
-{
- return __vlynq_register_driver(driver, THIS_MODULE);
-}
-
-static inline void *vlynq_get_drvdata(struct vlynq_device *dev)
-{
- return dev_get_drvdata(&dev->dev);
-}
-
-static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data)
-{
- dev_set_drvdata(&dev->dev, data);
-}
-
-static inline u32 vlynq_mem_start(struct vlynq_device *dev)
-{
- return dev->mem_start;
-}
-
-static inline u32 vlynq_mem_end(struct vlynq_device *dev)
-{
- return dev->mem_end;
-}
-
-static inline u32 vlynq_mem_len(struct vlynq_device *dev)
-{
- return dev->mem_end - dev->mem_start + 1;
-}
-
-static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq)
-{
- int irq = dev->irq_start + virq;
- if ((irq < dev->irq_start) || (irq > dev->irq_end))
- return -EINVAL;
-
- return irq;
-}
-
-static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq)
-{
- if ((irq < dev->irq_start) || (irq > dev->irq_end))
- return -EINVAL;
-
- return irq - dev->irq_start;
-}
-
-extern void vlynq_unregister_driver(struct vlynq_driver *driver);
-extern int vlynq_enable_device(struct vlynq_device *dev);
-extern void vlynq_disable_device(struct vlynq_device *dev);
-extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
- struct vlynq_mapping *mapping);
-extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
- struct vlynq_mapping *mapping);
-extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq);
-extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq);
-
-#endif /* __VLYNQ_H__ */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 8abfa1240040..747943bc8cc2 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -41,9 +41,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSTEAL_KHUGEPAGED,
- PGDEMOTE_KSWAPD,
- PGDEMOTE_DIRECT,
- PGDEMOTE_KHUGEPAGED,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSCAN_KHUGEPAGED,
@@ -145,6 +142,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_ZSWAP
ZSWPIN,
ZSWPOUT,
+ ZSWPWB,
#endif
#ifdef CONFIG_X86
DIRECT_MAP_LEVEL2_SPLIT,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index fed855bae6d8..343906a98d6e 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -556,19 +556,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
local_irq_restore(flags);
}
-void __mod_lruvec_page_state(struct page *page,
+void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val);
-static inline void mod_lruvec_page_state(struct page *page,
+static inline void lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
unsigned long flags;
local_irq_save(flags);
- __mod_lruvec_page_state(page, idx, val);
+ __lruvec_stat_mod_folio(folio, idx, val);
local_irq_restore(flags);
}
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ lruvec_stat_mod_folio(page_folio(page), idx, val);
+}
+
#else
static inline void __mod_lruvec_state(struct lruvec *lruvec,
@@ -583,37 +589,25 @@ static inline void mod_lruvec_state(struct lruvec *lruvec,
mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
}
-static inline void __mod_lruvec_page_state(struct page *page,
- enum node_stat_item idx, int val)
-{
- __mod_node_page_state(page_pgdat(page), idx, val);
-}
-
-static inline void mod_lruvec_page_state(struct page *page,
+static inline void __lruvec_stat_mod_folio(struct folio *folio,
enum node_stat_item idx, int val)
{
- mod_node_page_state(page_pgdat(page), idx, val);
+ __mod_node_page_state(folio_pgdat(folio), idx, val);
}
-#endif /* CONFIG_MEMCG */
-
-static inline void __inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void lruvec_stat_mod_folio(struct folio *folio,
+ enum node_stat_item idx, int val)
{
- __mod_lruvec_page_state(page, idx, 1);
+ mod_node_page_state(folio_pgdat(folio), idx, val);
}
-static inline void __dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
{
- __mod_lruvec_page_state(page, idx, -1);
+ mod_node_page_state(page_pgdat(page), idx, val);
}
-static inline void __lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- __mod_lruvec_page_state(&folio->page, idx, val);
-}
+#endif /* CONFIG_MEMCG */
static inline void __lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
@@ -627,24 +621,6 @@ static inline void __lruvec_stat_sub_folio(struct folio *folio,
__lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
}
-static inline void inc_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, 1);
-}
-
-static inline void dec_lruvec_page_state(struct page *page,
- enum node_stat_item idx)
-{
- mod_lruvec_page_state(page, idx, -1);
-}
-
-static inline void lruvec_stat_mod_folio(struct folio *folio,
- enum node_stat_item idx, int val)
-{
- mod_lruvec_page_state(&folio->page, idx, val);
-}
-
static inline void lruvec_stat_add_folio(struct folio *folio,
enum node_stat_item idx)
{
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
deleted file mode 100644
index 3495fd0dc790..000000000000
--- a/include/linux/w1-gpio.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * w1-gpio interface to platform code
- *
- * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
- */
-#ifndef _LINUX_W1_GPIO_H
-#define _LINUX_W1_GPIO_H
-
-struct gpio_desc;
-
-/**
- * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
- */
-struct w1_gpio_platform_data {
- struct gpio_desc *gpiod;
- struct gpio_desc *pullup_gpiod;
- void (*enable_external_pullup)(int enable);
- unsigned int pullup_duration;
-};
-
-#endif /* _LINUX_W1_GPIO_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a0307b516b09..8aa3372f21a0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -9,7 +9,6 @@
#include <linux/spinlock.h>
#include <asm/current.h>
-#include <uapi/linux/wait.h>
typedef struct wait_queue_entry wait_queue_entry_t;
@@ -19,10 +18,9 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
/* wait_queue_entry::flags */
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
-#define WQ_FLAG_BOOKMARK 0x04
-#define WQ_FLAG_CUSTOM 0x08
-#define WQ_FLAG_DONE 0x10
-#define WQ_FLAG_PRIORITY 0x20
+#define WQ_FLAG_CUSTOM 0x04
+#define WQ_FLAG_DONE 0x08
+#define WQ_FLAG_PRIORITY 0x10
/*
* A single wait-queue entry structure:
@@ -210,9 +208,8 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
}
int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
-void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
- unsigned int mode, void *key, wait_queue_entry_t *bookmark);
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
@@ -237,6 +234,8 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
#define wake_up_poll(x, m) \
__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
+#define wake_up_poll_on_current_cpu(x, m) \
+ __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m))
#define wake_up_locked_poll(x, m) \
__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
#define wake_up_interruptible_poll(x, m) \
diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h
index fc6bba20273b..429c7b6afead 100644
--- a/include/linux/watch_queue.h
+++ b/include/linux/watch_queue.h
@@ -32,13 +32,13 @@ struct watch_filter {
DECLARE_BITMAP(type_filter, WATCH_TYPE__NR);
};
u32 nr_filters; /* Number of filters */
- struct watch_type_filter filters[];
+ struct watch_type_filter filters[] __counted_by(nr_filters);
};
struct watch_queue {
struct rcu_head rcu;
struct watch_filter __rcu *filter;
- struct pipe_inode_info *pipe; /* The pipe we're using as a buffer */
+ struct pipe_inode_info *pipe; /* Pipe we use as a buffer, NULL if queue closed */
struct hlist_head watches; /* Contributory watches */
struct page **notes; /* Preallocated notifications */
unsigned long *notes_bitmap; /* Allocation bitmap for notes */
@@ -46,7 +46,6 @@ struct watch_queue {
spinlock_t lock;
unsigned int nr_notes; /* Number of notes */
unsigned int nr_pages; /* Number of pages in notes[] */
- bool defunct; /* T when queues closed */
};
/*
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
index b88d7b58e61e..686291b87852 100644
--- a/include/linux/wmi.h
+++ b/include/linux/wmi.h
@@ -11,27 +11,50 @@
#include <linux/device.h>
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
-#include <uapi/linux/wmi.h>
+/**
+ * struct wmi_device - WMI device structure
+ * @dev: Device associated with this WMI device
+ * @setable: True for devices implementing the Set Control Method
+ *
+ * This represents WMI devices discovered by the WMI driver core.
+ */
struct wmi_device {
struct device dev;
-
- /* True for data blocks implementing the Set Control Method */
bool setable;
};
-/* evaluate the ACPI method associated with this device */
+/**
+ * to_wmi_device() - Helper macro to cast a device to a wmi_device
+ * @device: device struct
+ *
+ * Cast a struct device to a struct wmi_device.
+ */
+#define to_wmi_device(device) container_of(device, struct wmi_device, dev)
+
extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev,
u8 instance, u32 method_id,
const struct acpi_buffer *in,
struct acpi_buffer *out);
-/* Caller must kfree the result. */
extern union acpi_object *wmidev_block_query(struct wmi_device *wdev,
u8 instance);
-extern int set_required_buffer_size(struct wmi_device *wdev, u64 length);
+acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct acpi_buffer *in);
+
+u8 wmidev_instance_count(struct wmi_device *wdev);
+/**
+ * struct wmi_driver - WMI driver structure
+ * @driver: Driver model structure
+ * @id_table: List of WMI GUIDs supported by this driver
+ * @no_notify_data: WMI events provide no event data
+ * @probe: Callback for device binding
+ * @remove: Callback for device unbinding
+ * @notify: Callback for receiving WMI events
+ *
+ * This represents WMI drivers which handle WMI devices.
+ */
struct wmi_driver {
struct device_driver driver;
const struct wmi_device_id *id_table;
@@ -40,15 +63,29 @@ struct wmi_driver {
int (*probe)(struct wmi_device *wdev, const void *context);
void (*remove)(struct wmi_device *wdev);
void (*notify)(struct wmi_device *device, union acpi_object *data);
- long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd,
- struct wmi_ioctl_buffer *arg);
};
extern int __must_check __wmi_driver_register(struct wmi_driver *driver,
struct module *owner);
extern void wmi_driver_unregister(struct wmi_driver *driver);
+
+/**
+ * wmi_driver_register() - Helper macro to register a WMI driver
+ * @driver: wmi_driver struct
+ *
+ * Helper macro for registering a WMI driver. It automatically passes
+ * THIS_MODULE to the underlying function.
+ */
#define wmi_driver_register(driver) __wmi_driver_register((driver), THIS_MODULE)
+/**
+ * module_wmi_driver() - Helper macro to register/unregister a WMI driver
+ * @__wmi_driver: wmi_driver struct
+ *
+ * Helper macro for WMI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit().
+ */
#define module_wmi_driver(__wmi_driver) \
module_driver(__wmi_driver, wmi_driver_register, \
wmi_driver_unregister)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 683efe29fa69..2cc0a9606175 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -14,12 +14,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/rcupdate.h>
-
-struct workqueue_struct;
-
-struct work_struct;
-typedef void (*work_func_t)(struct work_struct *work);
-void delayed_work_timer_fn(struct timer_list *t);
+#include <linux/workqueue_types.h>
/*
* The first word is the work queue pointer and the flags rolled into
@@ -95,15 +90,6 @@ enum {
#define WORK_STRUCT_FLAG_MASK ((1ul << WORK_STRUCT_FLAG_BITS) - 1)
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
-struct work_struct {
- atomic_long_t data;
- struct list_head entry;
- work_func_t func;
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
-#endif
-};
-
#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
#define WORK_DATA_STATIC_INIT() \
ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
@@ -125,6 +111,17 @@ struct rcu_work {
struct workqueue_struct *wq;
};
+enum wq_affn_scope {
+ WQ_AFFN_DFL, /* use system default */
+ WQ_AFFN_CPU, /* one pod per CPU */
+ WQ_AFFN_SMT, /* one pod poer SMT */
+ WQ_AFFN_CACHE, /* one pod per LLC */
+ WQ_AFFN_NUMA, /* one pod per NUMA node */
+ WQ_AFFN_SYSTEM, /* one pod across the whole system */
+
+ WQ_AFFN_NR_TYPES,
+};
+
/**
* struct workqueue_attrs - A struct for workqueue attributes.
*
@@ -138,17 +135,58 @@ struct workqueue_attrs {
/**
* @cpumask: allowed CPUs
+ *
+ * Work items in this workqueue are affine to these CPUs and not allowed
+ * to execute on other CPUs. A pool serving a workqueue must have the
+ * same @cpumask.
*/
cpumask_var_t cpumask;
/**
- * @no_numa: disable NUMA affinity
+ * @__pod_cpumask: internal attribute used to create per-pod pools
*
- * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
- * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
- * doesn't participate in pool hash calculations or equality comparisons.
+ * Internal use only.
+ *
+ * Per-pod unbound worker pools are used to improve locality. Always a
+ * subset of ->cpumask. A workqueue can be associated with multiple
+ * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
+ * of a pool's @__pod_cpumask is strict depends on @affn_strict.
*/
- bool no_numa;
+ cpumask_var_t __pod_cpumask;
+
+ /**
+ * @affn_strict: affinity scope is strict
+ *
+ * If clear, workqueue will make a best-effort attempt at starting the
+ * worker inside @__pod_cpumask but the scheduler is free to migrate it
+ * outside.
+ *
+ * If set, workers are only allowed to run inside @__pod_cpumask.
+ */
+ bool affn_strict;
+
+ /*
+ * Below fields aren't properties of a worker_pool. They only modify how
+ * :c:func:`apply_workqueue_attrs` select pools and thus don't
+ * participate in pool hash calculations or equality comparisons.
+ */
+
+ /**
+ * @affn_scope: unbound CPU affinity scope
+ *
+ * CPU pods are used to improve execution locality of unbound work
+ * items. There are multiple pod types, one for each wq_affn_scope, and
+ * every CPU in the system belongs to one pod in every pod type. CPUs
+ * that belong to the same pod share the worker pool. For example,
+ * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
+ * pool for each NUMA node.
+ */
+ enum wq_affn_scope affn_scope;
+
+ /**
+ * @ordered: work items must be executed one by one in queueing order
+ */
+ bool ordered;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -222,18 +260,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
* to generate better code.
*/
#ifdef CONFIG_LOCKDEP
-#define __INIT_WORK(_work, _func, _onstack) \
+#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
do { \
- static struct lock_class_key __key; \
- \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
- lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
+ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \
} while (0)
#else
-#define __INIT_WORK(_work, _func, _onstack) \
+#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
do { \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
@@ -242,12 +278,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
} while (0)
#endif
+#define __INIT_WORK(_work, _func, _onstack) \
+ do { \
+ static __maybe_unused struct lock_class_key __key; \
+ \
+ __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
+ } while (0)
+
#define INIT_WORK(_work, _func) \
__INIT_WORK((_work), (_func), 0)
#define INIT_WORK_ONSTACK(_work, _func) \
__INIT_WORK((_work), (_func), 1)
+#define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
+ __INIT_WORK_KEY((_work), (_func), 1, _key)
+
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
@@ -343,14 +389,10 @@ enum {
__WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
- WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
+ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
-/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
-#define WQ_UNBOUND_MAX_ACTIVE \
- max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
-
/*
* System-wide workqueues which are always present.
*
@@ -391,7 +433,7 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* alloc_workqueue - allocate a workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
- * @max_active: max in-flight work items, 0 for default
+ * @max_active: max in-flight work items per CPU, 0 for default
* remaining args: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
@@ -435,7 +477,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(void);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
-int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
+extern int workqueue_unbound_exclude_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
@@ -569,6 +611,7 @@ static inline bool schedule_work(struct work_struct *work)
/*
* Detect attempt to flush system-wide workqueues at compile time when possible.
+ * Warn attempt to flush system-wide workqueues at runtime.
*
* See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
* for reasons and steps for converting system-wide workqueues into local workqueues.
@@ -576,52 +619,13 @@ static inline bool schedule_work(struct work_struct *work)
extern void __warn_flushing_systemwide_wq(void)
__compiletime_warning("Please avoid flushing system-wide workqueues.");
-/**
- * flush_scheduled_work - ensure that any scheduled work has run to completion.
- *
- * Forces execution of the kernel-global workqueue and blocks until its
- * completion.
- *
- * It's very easy to get into trouble if you don't take great care.
- * Either of the following situations will lead to deadlock:
- *
- * One of the work items currently on the workqueue needs to acquire
- * a lock held by your code or its caller.
- *
- * Your code is running in the context of a work routine.
- *
- * They will be detected by lockdep when they occur, but the first might not
- * occur very often. It depends on what work items are on the workqueue and
- * what locks they need, which you have no control over.
- *
- * In most situations flushing the entire workqueue is overkill; you merely
- * need to know that a particular work item isn't queued and isn't running.
- * In such cases you should use cancel_delayed_work_sync() or
- * cancel_work_sync() instead.
- *
- * Please stop calling this function! A conversion to stop flushing system-wide
- * workqueues is in progress. This function will be removed after all in-tree
- * users stopped calling this function.
- */
-/*
- * The background of commit 771c035372a036f8 ("deprecate the
- * '__deprecated' attribute warnings entirely and for good") is that,
- * since Linus builds all modules between every single pull he does,
- * the standard kernel build needs to be _clean_ in order to be able to
- * notice when new problems happen. Therefore, don't emit warning while
- * there are in-tree users.
- */
+/* Please stop using this function, for this function will be removed in near future. */
#define flush_scheduled_work() \
({ \
- if (0) \
- __warn_flushing_systemwide_wq(); \
+ __warn_flushing_systemwide_wq(); \
__flush_workqueue(system_wq); \
})
-/*
- * Although there is no longer in-tree caller, for now just emit warning
- * in order to give out-of-tree callers time to update.
- */
#define flush_workqueue(wq) \
({ \
struct workqueue_struct *_wq = (wq); \
@@ -683,8 +687,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
return fn(arg);
}
#else
-long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
+long work_on_cpu_key(int cpu, long (*fn)(void *),
+ void *arg, struct lock_class_key *key);
+/*
+ * A new key is defined for each caller to make sure the work
+ * associated with the function doesn't share its locking class.
+ */
+#define work_on_cpu(_cpu, _fn, _arg) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ work_on_cpu_key(_cpu, _fn, _arg, &__key); \
+})
+
+long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
+ void *arg, struct lock_class_key *key);
+
+/*
+ * A new key is defined for each caller to make sure the work
+ * associated with the function doesn't share its locking class.
+ */
+#define work_on_cpu_safe(_cpu, _fn, _arg) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
+})
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
@@ -714,5 +742,6 @@ int workqueue_offline_cpu(unsigned int cpu);
void __init workqueue_init_early(void);
void __init workqueue_init(void);
+void __init workqueue_init_topology(void);
#endif
diff --git a/include/linux/workqueue_types.h b/include/linux/workqueue_types.h
new file mode 100644
index 000000000000..4c38824f3ab4
--- /dev/null
+++ b/include/linux/workqueue_types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_WORKQUEUE_TYPES_H
+#define _LINUX_WORKQUEUE_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/lockdep_types.h>
+#include <linux/timer_types.h>
+#include <linux/types.h>
+
+struct workqueue_struct;
+
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+void delayed_work_timer_fn(struct timer_list *t);
+
+struct work_struct {
+ atomic_long_t data;
+ struct list_head entry;
+ work_func_t func;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
+};
+
+#endif /* _LINUX_WORKQUEUE_TYPES_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fba937999fbf..453736fd1d23 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -60,7 +60,7 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
- unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */
+ unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */
/*
* When writeback IOs are bounced through async layers, only the
@@ -193,7 +193,6 @@ void inode_io_list_del(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
{
- might_sleep();
wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
}
@@ -375,11 +374,6 @@ void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
-void folio_account_redirty(struct folio *folio);
-static inline void account_page_redirty(struct page *page)
-{
- folio_account_redirty(page_folio(page));
-}
bool folio_redirty_for_writepage(struct writeback_control *, struct folio *);
bool redirty_page_for_writepage(struct writeback_control *, struct page *);
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 741703b45f61..cb571dfcf4b1 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d591ef59aa98..d20051865800 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -114,13 +114,15 @@ struct simple_xattr {
};
void simple_xattrs_init(struct simple_xattrs *xattrs);
-void simple_xattrs_free(struct simple_xattrs *xattrs);
+void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space);
+size_t simple_xattr_space(const char *name, size_t size);
struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
+void simple_xattr_free(struct simple_xattr *xattr);
int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
void *buffer, size_t size);
-int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
- const void *value, size_t size, int flags,
- ssize_t *removed_size);
+struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
+ const char *name, const void *value,
+ size_t size, int flags);
ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
char *buffer, size_t size);
void simple_xattr_add(struct simple_xattrs *xattrs,
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index e8997010612a..3296438eec06 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -14,10 +14,6 @@
struct zpool;
-struct zpool_ops {
- int (*evict)(struct zpool *pool, unsigned long handle);
-};
-
/*
* Control how a handle is mapped. It will be ignored if the
* implementation does not support it. Its use is optional.
@@ -39,8 +35,7 @@ enum zpool_mapmode {
bool zpool_has_pool(char *type);
-struct zpool *zpool_create_pool(const char *type, const char *name,
- gfp_t gfp, const struct zpool_ops *ops);
+struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp);
const char *zpool_get_type(struct zpool *pool);
@@ -53,9 +48,6 @@ int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
void zpool_free(struct zpool *pool, unsigned long handle);
-int zpool_shrink(struct zpool *pool, unsigned int pages,
- unsigned int *reclaimed);
-
void *zpool_map_handle(struct zpool *pool, unsigned long handle,
enum zpool_mapmode mm);
@@ -72,7 +64,6 @@ u64 zpool_get_total_size(struct zpool *pool);
* @destroy: destroy a pool.
* @malloc: allocate mem from a pool.
* @free: free mem from a pool.
- * @shrink: shrink the pool.
* @sleep_mapped: whether zpool driver can sleep during map.
* @map: map a handle.
* @unmap: unmap a handle.
@@ -87,10 +78,7 @@ struct zpool_driver {
atomic_t refcount;
struct list_head list;
- void *(*create)(const char *name,
- gfp_t gfp,
- const struct zpool_ops *ops,
- struct zpool *zpool);
+ void *(*create)(const char *name, gfp_t gfp);
void (*destroy)(void *pool);
bool malloc_support_movable;
@@ -98,9 +86,6 @@ struct zpool_driver {
unsigned long *handle);
void (*free)(void *pool, unsigned long handle);
- int (*shrink)(void *pool, unsigned int pages,
- unsigned int *reclaimed);
-
bool sleep_mapped;
void *(*map)(void *pool, unsigned long handle,
enum zpool_mapmode mm);
@@ -113,7 +98,6 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver);
-bool zpool_evictable(struct zpool *pool);
bool zpool_can_sleep_mapped(struct zpool *pool);
#endif
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
new file mode 100644
index 000000000000..0b709f5bc65f
--- /dev/null
+++ b/include/linux/zswap.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ZSWAP_H
+#define _LINUX_ZSWAP_H
+
+#include <linux/types.h>
+#include <linux/mm_types.h>
+
+struct lruvec;
+
+extern u64 zswap_pool_total_size;
+extern atomic_t zswap_stored_pages;
+
+#ifdef CONFIG_ZSWAP
+
+struct zswap_lruvec_state {
+ /*
+ * Number of pages in zswap that should be protected from the shrinker.
+ * This number is an estimate of the following counts:
+ *
+ * a) Recent page faults.
+ * b) Recent insertion to the zswap LRU. This includes new zswap stores,
+ * as well as recent zswap LRU rotations.
+ *
+ * These pages are likely to be warm, and might incur IO if the are written
+ * to swap.
+ */
+ atomic_long_t nr_zswap_protected;
+};
+
+bool zswap_store(struct folio *folio);
+bool zswap_load(struct folio *folio);
+void zswap_invalidate(int type, pgoff_t offset);
+void zswap_swapon(int type);
+void zswap_swapoff(int type);
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg);
+void zswap_lruvec_state_init(struct lruvec *lruvec);
+void zswap_folio_swapin(struct folio *folio);
+bool is_zswap_enabled(void);
+#else
+
+struct zswap_lruvec_state {};
+
+static inline bool zswap_store(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool zswap_load(struct folio *folio)
+{
+ return false;
+}
+
+static inline void zswap_invalidate(int type, pgoff_t offset) {}
+static inline void zswap_swapon(int type) {}
+static inline void zswap_swapoff(int type) {}
+static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {}
+static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {}
+static inline void zswap_folio_swapin(struct folio *folio) {}
+
+static inline bool is_zswap_enabled(void)
+{
+ return false;
+}
+
+#endif
+
+#endif /* _LINUX_ZSWAP_H */
diff --git a/include/media/cec.h b/include/media/cec.h
index abee41ae02d0..d77982685116 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -113,22 +113,25 @@ struct cec_fh {
#define CEC_FREE_TIME_TO_USEC(ft) ((ft) * 2400)
struct cec_adap_ops {
- /* Low-level callbacks */
+ /* Low-level callbacks, called with adap->lock held */
int (*adap_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
- void (*adap_configured)(struct cec_adapter *adap, bool configured);
+ void (*adap_unconfigured)(struct cec_adapter *adap);
int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg);
+ void (*adap_nb_transmit_canceled)(struct cec_adapter *adap,
+ const struct cec_msg *msg);
void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
void (*adap_free)(struct cec_adapter *adap);
- /* Error injection callbacks */
+ /* Error injection callbacks, called without adap->lock held */
int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf);
bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line);
- /* High-level CEC message callback */
+ /* High-level CEC message callback, called without adap->lock held */
+ void (*configured)(struct cec_adapter *adap);
int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
};
@@ -204,7 +207,20 @@ struct cec_adap_ops {
* passthrough mode.
* @log_addrs: current logical addresses
* @conn_info: current connector info
- * @tx_timeouts: number of transmit timeouts
+ * @tx_timeout_cnt: count the number of Timed Out transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_low_drive_cnt: count the number of Low Drive transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_error_cnt: count the number of Error transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_arb_lost_cnt: count the number of Arb Lost transmits.
+ * Reset to 0 when this is reported in cec_adap_status().
+ * @tx_low_drive_log_cnt: number of logged Low Drive transmits since the
+ * adapter was enabled. Used to avoid flooding the kernel
+ * log if this happens a lot.
+ * @tx_error_log_cnt: number of logged Error transmits since the adapter was
+ * enabled. Used to avoid flooding the kernel log if this
+ * happens a lot.
* @notifier: CEC notifier
* @pin: CEC pin status struct
* @cec_dir: debugfs cec directory
@@ -259,7 +275,12 @@ struct cec_adapter {
struct cec_log_addrs log_addrs;
struct cec_connector_info conn_info;
- u32 tx_timeouts;
+ u32 tx_timeout_cnt;
+ u32 tx_low_drive_cnt;
+ u32 tx_error_cnt;
+ u32 tx_arb_lost_cnt;
+ u32 tx_low_drive_log_cnt;
+ u32 tx_error_log_cnt;
#ifdef CONFIG_CEC_NOTIFIER
struct cec_notifier *notifier;
@@ -272,7 +293,7 @@ struct cec_adapter {
u32 sequence;
- char input_phys[32];
+ char input_phys[40];
};
static inline void *cec_get_drvdata(const struct cec_adapter *adap)
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index d03e5c54347a..6cce1f09c721 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -72,7 +72,7 @@ struct vpif_capture_config {
int i2c_adapter_id;
const char *card_name;
- struct v4l2_async_subdev *asd[VPIF_CAPTURE_MAX_CHANNELS];
+ struct v4l2_async_connection *asd[VPIF_CAPTURE_MAX_CHANNELS];
int asd_sizes[VPIF_CAPTURE_MAX_CHANNELS];
};
#endif /* _VPIF_TYPES_H */
diff --git a/include/media/dvbdev.h b/include/media/dvbdev.h
index 8958e5e2fc5b..e5a00d126612 100644
--- a/include/media/dvbdev.h
+++ b/include/media/dvbdev.h
@@ -130,7 +130,7 @@ struct dvb_adapter {
* struct dvb_device - represents a DVB device node
*
* @list_head: List head with all DVB devices
- * @ref: reference counter
+ * @ref: reference count for this device
* @fops: pointer to struct file_operations
* @adapter: pointer to the adapter that holds this device node
* @type: type of the device, as defined by &enum dvb_device_type.
@@ -266,10 +266,10 @@ int dvb_register_device(struct dvb_adapter *adap,
/**
* dvb_remove_device - Remove a registered DVB device
*
+ * @dvbdev: pointer to struct dvb_device
+ *
* This does not free memory. dvb_free_device() will do that when
* reference counter is empty
- *
- * @dvbdev: pointer to struct dvb_device
*/
void dvb_remove_device(struct dvb_device *dvbdev);
diff --git a/include/media/i2c/ds90ub9xx.h b/include/media/i2c/ds90ub9xx.h
new file mode 100644
index 000000000000..0245198469ec
--- /dev/null
+++ b/include/media/i2c/ds90ub9xx.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEDIA_I2C_DS90UB9XX_H__
+#define __MEDIA_I2C_DS90UB9XX_H__
+
+#include <linux/types.h>
+
+struct i2c_atr;
+
+/**
+ * struct ds90ub9xx_platform_data - platform data for FPD-Link Serializers.
+ * @port: Deserializer RX port for this Serializer
+ * @atr: I2C ATR
+ * @bc_rate: back-channel clock rate
+ */
+struct ds90ub9xx_platform_data {
+ u32 port;
+ struct i2c_atr *atr;
+ unsigned long bc_rate;
+};
+
+#endif /* __MEDIA_I2C_DS90UB9XX_H__ */
diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h
new file mode 100644
index 000000000000..783bda6d5cc3
--- /dev/null
+++ b/include/media/ipu-bridge.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Author: Dan Scally <djrscally@gmail.com> */
+#ifndef __IPU_BRIDGE_H
+#define __IPU_BRIDGE_H
+
+#include <linux/property.h>
+#include <linux/types.h>
+#include <media/v4l2-fwnode.h>
+
+#define IPU_HID "INT343E"
+#define IPU_MAX_LANES 4
+#define IPU_MAX_PORTS 4
+#define MAX_NUM_LINK_FREQS 3
+
+/* Values are educated guesses as we don't have a spec */
+#define IPU_SENSOR_ROTATION_NORMAL 0
+#define IPU_SENSOR_ROTATION_INVERTED 1
+
+#define IPU_SENSOR_CONFIG(_HID, _NR, ...) \
+ (const struct ipu_sensor_config) { \
+ .hid = _HID, \
+ .nr_link_freqs = _NR, \
+ .link_freqs = { __VA_ARGS__ } \
+ }
+
+#define NODE_SENSOR(_HID, _PROPS) \
+ (const struct software_node) { \
+ .name = _HID, \
+ .properties = _PROPS, \
+ }
+
+#define NODE_PORT(_PORT, _SENSOR_NODE) \
+ (const struct software_node) { \
+ .name = _PORT, \
+ .parent = _SENSOR_NODE, \
+ }
+
+#define NODE_ENDPOINT(_EP, _PORT, _PROPS) \
+ (const struct software_node) { \
+ .name = _EP, \
+ .parent = _PORT, \
+ .properties = _PROPS, \
+ }
+
+#define NODE_VCM(_TYPE) \
+ (const struct software_node) { \
+ .name = _TYPE, \
+ }
+
+enum ipu_sensor_swnodes {
+ SWNODE_SENSOR_HID,
+ SWNODE_SENSOR_PORT,
+ SWNODE_SENSOR_ENDPOINT,
+ SWNODE_IPU_PORT,
+ SWNODE_IPU_ENDPOINT,
+ /* below are optional / maybe empty */
+ SWNODE_IVSC_HID,
+ SWNODE_IVSC_SENSOR_PORT,
+ SWNODE_IVSC_SENSOR_ENDPOINT,
+ SWNODE_IVSC_IPU_PORT,
+ SWNODE_IVSC_IPU_ENDPOINT,
+ SWNODE_VCM,
+ SWNODE_COUNT
+};
+
+/* Data representation as it is in ACPI SSDB buffer */
+struct ipu_sensor_ssdb {
+ u8 version;
+ u8 sku;
+ u8 guid_csi2[16];
+ u8 devfunction;
+ u8 bus;
+ u32 dphylinkenfuses;
+ u32 clockdiv;
+ u8 link;
+ u8 lanes;
+ u32 csiparams[10];
+ u32 maxlanespeed;
+ u8 sensorcalibfileidx;
+ u8 sensorcalibfileidxInMBZ[3];
+ u8 romtype;
+ u8 vcmtype;
+ u8 platforminfo;
+ u8 platformsubinfo;
+ u8 flash;
+ u8 privacyled;
+ u8 degree;
+ u8 mipilinkdefined;
+ u32 mclkspeed;
+ u8 controllogicid;
+ u8 reserved1[3];
+ u8 mclkport;
+ u8 reserved2[13];
+} __packed;
+
+struct ipu_property_names {
+ char clock_frequency[16];
+ char rotation[9];
+ char orientation[12];
+ char bus_type[9];
+ char data_lanes[11];
+ char remote_endpoint[16];
+ char link_frequencies[17];
+};
+
+struct ipu_node_names {
+ char port[7];
+ char ivsc_sensor_port[7];
+ char ivsc_ipu_port[7];
+ char endpoint[11];
+ char remote_port[9];
+ char vcm[16];
+};
+
+struct ipu_sensor_config {
+ const char *hid;
+ const u8 nr_link_freqs;
+ const u64 link_freqs[MAX_NUM_LINK_FREQS];
+};
+
+struct ipu_sensor {
+ /* append ssdb.link(u8) in "-%u" format as suffix of HID */
+ char name[ACPI_ID_LEN + 4];
+ struct acpi_device *adev;
+
+ struct device *csi_dev;
+ struct acpi_device *ivsc_adev;
+ char ivsc_name[ACPI_ID_LEN + 4];
+
+ /* SWNODE_COUNT + 1 for terminating NULL */
+ const struct software_node *group[SWNODE_COUNT + 1];
+ struct software_node swnodes[SWNODE_COUNT];
+ struct ipu_node_names node_names;
+
+ u8 link;
+ u8 lanes;
+ u32 mclkspeed;
+ u32 rotation;
+ enum v4l2_fwnode_orientation orientation;
+ const char *vcm_type;
+
+ struct ipu_property_names prop_names;
+ struct property_entry ep_properties[5];
+ struct property_entry dev_properties[5];
+ struct property_entry ipu_properties[3];
+ struct property_entry ivsc_properties[1];
+ struct property_entry ivsc_sensor_ep_properties[4];
+ struct property_entry ivsc_ipu_ep_properties[4];
+
+ struct software_node_ref_args local_ref[1];
+ struct software_node_ref_args remote_ref[1];
+ struct software_node_ref_args vcm_ref[1];
+ struct software_node_ref_args ivsc_sensor_ref[1];
+ struct software_node_ref_args ivsc_ipu_ref[1];
+};
+
+typedef int (*ipu_parse_sensor_fwnode_t)(struct acpi_device *adev,
+ struct ipu_sensor *sensor);
+
+struct ipu_bridge {
+ struct device *dev;
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode;
+ char ipu_node_name[ACPI_ID_LEN];
+ struct software_node ipu_hid_node;
+ u32 data_lanes[4];
+ unsigned int n_sensors;
+ struct ipu_sensor sensors[IPU_MAX_PORTS];
+};
+
+#if IS_ENABLED(CONFIG_IPU_BRIDGE)
+int ipu_bridge_init(struct device *dev,
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode);
+int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor);
+int ipu_bridge_instantiate_vcm(struct device *sensor);
+#else
+/* Use a define to avoid the @parse_sensor_fwnode argument getting evaluated */
+#define ipu_bridge_init(dev, parse_sensor_fwnode) (0)
+static inline int ipu_bridge_instantiate_vcm(struct device *s) { return 0; }
+#endif
+
+#endif
diff --git a/include/media/jpeg.h b/include/media/jpeg.h
new file mode 100644
index 000000000000..a01e142e99a7
--- /dev/null
+++ b/include/media/jpeg.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _MEDIA_JPEG_H_
+#define _MEDIA_JPEG_H_
+
+/* JPEG markers */
+#define JPEG_MARKER_TEM 0x01
+#define JPEG_MARKER_SOF0 0xc0
+#define JPEG_MARKER_DHT 0xc4
+#define JPEG_MARKER_RST 0xd0
+#define JPEG_MARKER_SOI 0xd8
+#define JPEG_MARKER_EOI 0xd9
+#define JPEG_MARKER_SOS 0xda
+#define JPEG_MARKER_DQT 0xdb
+#define JPEG_MARKER_DRI 0xdd
+#define JPEG_MARKER_DHP 0xde
+#define JPEG_MARKER_APP0 0xe0
+#define JPEG_MARKER_COM 0xfe
+
+#endif /* _MEDIA_JPEG_H_ */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 741f9c629c6f..2b6cd343ee9e 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -741,7 +741,7 @@ static inline void media_entity_cleanup(struct media_entity *entity) {}
* media_get_pad_index() - retrieves a pad index from an entity
*
* @entity: entity where the pads belong
- * @is_sink: true if the pad is a sink, false if it is a source
+ * @pad_type: the type of the pad, one of MEDIA_PAD_FL_* pad types
* @sig_type: type of signal of the pad to be search
*
* This helper function finds the first pad index inside an entity that
@@ -752,7 +752,7 @@ static inline void media_entity_cleanup(struct media_entity *entity) {}
* On success, return the pad number. If the pad was not found or the media
* entity is a NULL pointer, return -EINVAL.
*/
-int media_get_pad_index(struct media_entity *entity, bool is_sink,
+int media_get_pad_index(struct media_entity *entity, u32 pad_type,
enum media_pad_signal_type sig_type);
/**
@@ -1079,7 +1079,7 @@ struct media_pipeline *media_pad_pipeline(struct media_pad *pad);
* Return: returns the pad number on success or a negative error code.
*/
int media_entity_get_fwnode_pad(struct media_entity *entity,
- struct fwnode_handle *fwnode,
+ const struct fwnode_handle *fwnode,
unsigned long direction_flags);
/**
diff --git a/include/media/mipi-csi2.h b/include/media/mipi-csi2.h
index c3d8f12234b1..40fc0264250d 100644
--- a/include/media/mipi-csi2.h
+++ b/include/media/mipi-csi2.h
@@ -19,6 +19,7 @@
#define MIPI_CSI2_DT_NULL 0x10
#define MIPI_CSI2_DT_BLANKING 0x11
#define MIPI_CSI2_DT_EMBEDDED_8B 0x12
+#define MIPI_CSI2_DT_GENERIC_LONG(n) (0x13 + (n) - 1) /* 1..4 */
#define MIPI_CSI2_DT_YUV420_8B 0x18
#define MIPI_CSI2_DT_YUV420_10B 0x19
#define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a
diff --git a/include/media/ov_16bit_addr_reg_helpers.h b/include/media/ov_16bit_addr_reg_helpers.h
deleted file mode 100644
index 1c60a50bd795..000000000000
--- a/include/media/ov_16bit_addr_reg_helpers.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * I2C register access helpers for Omnivision OVxxxx image sensors which expect
- * a 16 bit register address in big-endian format and which have 1-3 byte
- * wide registers, in big-endian format (for the higher width registers).
- *
- * Based on the register helpers from drivers/media/i2c/ov2680.c which is:
- * Copyright (C) 2018 Linaro Ltd
- */
-#ifndef __OV_16BIT_ADDR_REG_HELPERS_H
-#define __OV_16BIT_ADDR_REG_HELPERS_H
-
-#include <asm/unaligned.h>
-#include <linux/dev_printk.h>
-#include <linux/i2c.h>
-
-static inline int ov_read_reg(struct i2c_client *client, u16 reg,
- unsigned int len, u32 *val)
-{
- u8 addr_buf[2], data_buf[4] = { };
- struct i2c_msg msgs[2];
- int ret;
-
- if (len > 4)
- return -EINVAL;
-
- put_unaligned_be16(reg, addr_buf);
-
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = ARRAY_SIZE(addr_buf);
- msgs[0].buf = addr_buf;
-
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = &data_buf[4 - len];
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "read error: reg=0x%4x: %d\n", reg, ret);
- return -EIO;
- }
-
- *val = get_unaligned_be32(data_buf);
-
- return 0;
-}
-
-#define ov_read_reg8(s, r, v) ov_read_reg(s, r, 1, v)
-#define ov_read_reg16(s, r, v) ov_read_reg(s, r, 2, v)
-#define ov_read_reg24(s, r, v) ov_read_reg(s, r, 3, v)
-
-static inline int ov_write_reg(struct i2c_client *client, u16 reg,
- unsigned int len, u32 val)
-{
- u8 buf[6];
- int ret;
-
- if (len > 4)
- return -EINVAL;
-
- put_unaligned_be16(reg, buf);
- put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
- ret = i2c_master_send(client, buf, len + 2);
- if (ret != len + 2) {
- dev_err(&client->dev, "write error: reg=0x%4x: %d\n", reg, ret);
- return -EIO;
- }
-
- return 0;
-}
-
-#define ov_write_reg8(s, r, v) ov_write_reg(s, r, 1, v)
-#define ov_write_reg16(s, r, v) ov_write_reg(s, r, 2, v)
-#define ov_write_reg24(s, r, v) ov_write_reg(s, r, 3, v)
-
-static inline int ov_update_reg(struct i2c_client *client, u16 reg, u8 mask, u8 val)
-{
- u32 readval;
- int ret;
-
- ret = ov_read_reg8(client, reg, &readval);
- if (ret < 0)
- return ret;
-
- val = (readval & ~mask) | (val & mask);
-
- return ov_write_reg8(client, reg, val);
-}
-
-#endif
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 25eb1d138c06..9bd326d31181 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -22,76 +22,85 @@ struct v4l2_async_notifier;
* enum v4l2_async_match_type - type of asynchronous subdevice logic to be used
* in order to identify a match
*
- * @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address
- * @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node
+ * @V4L2_ASYNC_MATCH_TYPE_I2C: Match will check for I2C adapter ID and address
+ * @V4L2_ASYNC_MATCH_TYPE_FWNODE: Match will use firmware node
*
- * This enum is used by the asynchronous sub-device logic to define the
+ * This enum is used by the asynchronous connection logic to define the
* algorithm that will be used to match an asynchronous device.
*/
enum v4l2_async_match_type {
- V4L2_ASYNC_MATCH_I2C,
- V4L2_ASYNC_MATCH_FWNODE,
+ V4L2_ASYNC_MATCH_TYPE_I2C,
+ V4L2_ASYNC_MATCH_TYPE_FWNODE,
};
/**
- * struct v4l2_async_subdev - sub-device descriptor, as known to a bridge
- *
- * @match_type: type of match that will be used
- * @match: union of per-bus type matching data sets
- * @match.fwnode:
- * pointer to &struct fwnode_handle to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_FWNODE.
- * @match.i2c: embedded struct with I2C parameters to be matched.
+ * struct v4l2_async_match_desc - async connection match information
+ *
+ * @type: type of match that will be used
+ * @fwnode: pointer to &struct fwnode_handle to be matched.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_FWNODE.
+ * @i2c: embedded struct with I2C parameters to be matched.
* Both @match.i2c.adapter_id and @match.i2c.address
* should be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.i2c.adapter_id:
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
+ * @i2c.adapter_id:
* I2C adapter ID to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.i2c.address:
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
+ * @i2c.address:
* I2C address to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @asd_list: used to add struct v4l2_async_subdev objects to the
- * master notifier @asd_list
- * @list: used to link struct v4l2_async_subdev objects, waiting to be
- * probed, to a notifier->waiting list
- *
- * When this struct is used as a member in a driver specific struct,
- * the driver specific struct shall contain the &struct
- * v4l2_async_subdev as its first member.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
*/
-struct v4l2_async_subdev {
- enum v4l2_async_match_type match_type;
+struct v4l2_async_match_desc {
+ enum v4l2_async_match_type type;
union {
struct fwnode_handle *fwnode;
struct {
int adapter_id;
unsigned short address;
} i2c;
- } match;
+ };
+};
- /* v4l2-async core private: not to be used by drivers */
- struct list_head list;
- struct list_head asd_list;
+/**
+ * struct v4l2_async_connection - sub-device connection descriptor, as known to
+ * a bridge
+ *
+ * @match: struct of match type and per-bus type matching data sets
+ * @notifier: the async notifier the connection is related to
+ * @asc_entry: used to add struct v4l2_async_connection objects to the
+ * notifier @waiting_list or @done_list
+ * @asc_subdev_entry: entry in struct v4l2_async_subdev.asc_list list
+ * @sd: the related sub-device
+ *
+ * When this struct is used as a member in a driver specific struct, the driver
+ * specific struct shall contain the &struct v4l2_async_connection as its first
+ * member.
+ */
+struct v4l2_async_connection {
+ struct v4l2_async_match_desc match;
+ struct v4l2_async_notifier *notifier;
+ struct list_head asc_entry;
+ struct list_head asc_subdev_entry;
+ struct v4l2_subdev *sd;
};
/**
* struct v4l2_async_notifier_operations - Asynchronous V4L2 notifier operations
- * @bound: a subdevice driver has successfully probed one of the subdevices
- * @complete: All subdevices have been probed successfully. The complete
+ * @bound: a sub-device has been bound by the given connection
+ * @complete: All connections have been bound successfully. The complete
* callback is only executed for the root notifier.
* @unbind: a subdevice is leaving
- * @destroy: the asd is about to be freed
+ * @destroy: the asc is about to be freed
*/
struct v4l2_async_notifier_operations {
int (*bound)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
+ struct v4l2_async_connection *asc);
int (*complete)(struct v4l2_async_notifier *notifier);
void (*unbind)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
- void (*destroy)(struct v4l2_async_subdev *asd);
+ struct v4l2_async_connection *asc);
+ void (*destroy)(struct v4l2_async_connection *asc);
};
/**
@@ -101,20 +110,31 @@ struct v4l2_async_notifier_operations {
* @v4l2_dev: v4l2_device of the root notifier, NULL otherwise
* @sd: sub-device that registered the notifier, NULL otherwise
* @parent: parent notifier
- * @asd_list: master list of struct v4l2_async_subdev
- * @waiting: list of struct v4l2_async_subdev, waiting for their drivers
- * @done: list of struct v4l2_subdev, already probed
- * @list: member in a global list of notifiers
+ * @waiting_list: list of struct v4l2_async_connection, waiting for their
+ * drivers
+ * @done_list: list of struct v4l2_subdev, already probed
+ * @notifier_entry: member in a global list of notifiers
*/
struct v4l2_async_notifier {
const struct v4l2_async_notifier_operations *ops;
struct v4l2_device *v4l2_dev;
struct v4l2_subdev *sd;
struct v4l2_async_notifier *parent;
- struct list_head asd_list;
- struct list_head waiting;
- struct list_head done;
- struct list_head list;
+ struct list_head waiting_list;
+ struct list_head done_list;
+ struct list_head notifier_entry;
+};
+
+/**
+ * struct v4l2_async_subdev_endpoint - Entry in sub-device's fwnode list
+ *
+ * @async_subdev_endpoint_entry: An entry in async_subdev_endpoint_list of
+ * &struct v4l2_subdev
+ * @endpoint: Endpoint fwnode agains which to match the sub-device
+ */
+struct v4l2_async_subdev_endpoint {
+ struct list_head async_subdev_endpoint_entry;
+ struct fwnode_handle *endpoint;
};
/**
@@ -128,76 +148,71 @@ void v4l2_async_debug_init(struct dentry *debugfs_dir);
* v4l2_async_nf_init - Initialize a notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
+ * @v4l2_dev: pointer to &struct v4l2_device
*
- * This function initializes the notifier @asd_list. It must be called
+ * This function initializes the notifier @asc_entry. It must be called
* before adding a subdevice to a notifier, using one of:
* v4l2_async_nf_add_fwnode_remote(),
- * v4l2_async_nf_add_fwnode(),
- * v4l2_async_nf_add_i2c(),
- * __v4l2_async_nf_add_subdev() or
- * v4l2_async_nf_parse_fwnode_endpoints().
+ * v4l2_async_nf_add_fwnode() or
+ * v4l2_async_nf_add_i2c().
*/
-void v4l2_async_nf_init(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev);
/**
- * __v4l2_async_nf_add_subdev - Add an async subdev to the
- * notifier's master asd list.
+ * v4l2_async_subdev_nf_init - Initialize a sub-device notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @asd: pointer to &struct v4l2_async_subdev
+ * @sd: pointer to &struct v4l2_subdev
*
- * \warning: Drivers should avoid using this function and instead use one of:
- * v4l2_async_nf_add_fwnode(),
- * v4l2_async_nf_add_fwnode_remote() or
+ * This function initializes the notifier @asc_list. It must be called
+ * before adding a subdevice to a notifier, using one of:
+ * v4l2_async_nf_add_fwnode_remote(), v4l2_async_nf_add_fwnode() or
* v4l2_async_nf_add_i2c().
- *
- * Call this function before registering a notifier to link the provided @asd to
- * the notifiers master @asd_list. The @asd must be allocated with k*alloc() as
- * it will be freed by the framework when the notifier is destroyed.
*/
-int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd);
+void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd);
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
struct fwnode_handle *fwnode,
- unsigned int asd_struct_size);
+ unsigned int asc_struct_size);
/**
* v4l2_async_nf_add_fwnode - Allocate and add a fwnode async
- * subdev to the notifier's master asd_list.
+ * subdev to the notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
* @fwnode: fwnode handle of the sub-device to be matched, pointer to
* &struct fwnode_handle
- * @type: Type of the driver's async sub-device struct. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @type: Type of the driver's async sub-device or connection struct. The
+ * &struct v4l2_async_connection shall be the first member of the
+ * driver's async struct, i.e. both begin at the same memory address.
*
- * Allocate a fwnode-matched asd of size asd_struct_size, and add it to the
- * notifiers @asd_list. The function also gets a reference of the fwnode which
+ * Allocate a fwnode-matched asc of size asc_struct_size, and add it to the
+ * notifiers @asc_list. The function also gets a reference of the fwnode which
* is released later at notifier cleanup time.
*/
#define v4l2_async_nf_add_fwnode(notifier, fwnode, type) \
((type *)__v4l2_async_nf_add_fwnode(notifier, fwnode, sizeof(type)))
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
struct fwnode_handle *endpoint,
- unsigned int asd_struct_size);
+ unsigned int asc_struct_size);
/**
* v4l2_async_nf_add_fwnode_remote - Allocate and add a fwnode
* remote async subdev to the
- * notifier's master asd_list.
+ * notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @ep: local endpoint pointing to the remote sub-device to be matched,
+ * @ep: local endpoint pointing to the remote connection to be matched,
* pointer to &struct fwnode_handle
- * @type: Type of the driver's async sub-device struct. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @type: Type of the driver's async connection struct. The &struct
+ * v4l2_async_connection shall be the first member of the driver's async
+ * connection struct, i.e. both begin at the same memory address.
*
* Gets the remote endpoint of a given local endpoint, set it up for fwnode
- * matching and adds the async sub-device to the notifier's @asd_list. The
+ * matching and adds the async connection to the notifier's @asc_list. The
* function also gets a reference of the fwnode which is released later at
* notifier cleanup time.
*
@@ -207,46 +222,63 @@ __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
#define v4l2_async_nf_add_fwnode_remote(notifier, ep, type) \
((type *)__v4l2_async_nf_add_fwnode_remote(notifier, ep, sizeof(type)))
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier,
int adapter_id, unsigned short address,
- unsigned int asd_struct_size);
+ unsigned int asc_struct_size);
/**
* v4l2_async_nf_add_i2c - Allocate and add an i2c async
- * subdev to the notifier's master asd_list.
+ * subdev to the notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
* @adapter: I2C adapter ID to be matched
- * @address: I2C address of sub-device to be matched
- * @type: Type of the driver's async sub-device struct. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @address: I2C address of connection to be matched
+ * @type: Type of the driver's async connection struct. The &struct
+ * v4l2_async_connection shall be the first member of the driver's async
+ * connection struct, i.e. both begin at the same memory address.
*
* Same as v4l2_async_nf_add_fwnode() but for I2C matched
- * sub-devices.
+ * connections.
*/
#define v4l2_async_nf_add_i2c(notifier, adapter, address, type) \
((type *)__v4l2_async_nf_add_i2c(notifier, adapter, address, \
sizeof(type)))
/**
- * v4l2_async_nf_register - registers a subdevice asynchronous notifier
+ * v4l2_async_subdev_endpoint_add - Add an endpoint fwnode to async sub-device
+ * matching list
*
- * @v4l2_dev: pointer to &struct v4l2_device
- * @notifier: pointer to &struct v4l2_async_notifier
+ * @sd: the sub-device
+ * @fwnode: the endpoint fwnode to match
+ *
+ * Add a fwnode to the async sub-device's matching list. This allows registering
+ * multiple async sub-devices from a single device.
+ *
+ * Note that calling v4l2_subdev_cleanup() as part of the sub-device's cleanup
+ * if endpoints have been added to the sub-device's fwnode matching list.
+ *
+ * Returns an error on failure, 0 on success.
*/
-int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd,
+ struct fwnode_handle *fwnode);
/**
- * v4l2_async_subdev_nf_register - registers a subdevice asynchronous
- * notifier for a sub-device
+ * v4l2_async_connection_unique - return a unique &struct v4l2_async_connection
+ * for a sub-device
+ * @sd: the sub-device
+ *
+ * Return an async connection for a sub-device, when there is a single
+ * one only.
+ */
+struct v4l2_async_connection *
+v4l2_async_connection_unique(struct v4l2_subdev *sd);
+
+/**
+ * v4l2_async_nf_register - registers a subdevice asynchronous notifier
*
- * @sd: pointer to &struct v4l2_subdev
* @notifier: pointer to &struct v4l2_async_notifier
*/
-int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_nf_register(struct v4l2_async_notifier *notifier);
/**
* v4l2_async_nf_unregister - unregisters a subdevice
@@ -261,14 +293,10 @@ void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier);
* @notifier: the notifier the resources of which are to be cleaned up
*
* Release memory resources related to a notifier, including the async
- * sub-devices allocated for the purposes of the notifier but not the notifier
+ * connections allocated for the purposes of the notifier but not the notifier
* itself. The user is responsible for calling this function to clean up the
- * notifier after calling
- * v4l2_async_nf_add_fwnode_remote(),
- * v4l2_async_nf_add_fwnode(),
- * v4l2_async_nf_add_i2c(),
- * __v4l2_async_nf_add_subdev() or
- * v4l2_async_nf_parse_fwnode_endpoints().
+ * notifier after calling v4l2_async_nf_add_fwnode_remote(),
+ * v4l2_async_nf_add_fwnode() or v4l2_async_nf_add_i2c().
*
* There is no harm from calling v4l2_async_nf_cleanup() in other
* cases as long as its memory has been zeroed after it has been
diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
new file mode 100644
index 000000000000..4e96e90ee636
--- /dev/null
+++ b/include/media/v4l2-cci.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MIPI Camera Control Interface (CCI) register access helpers.
+ *
+ * Copyright (C) 2023 Hans de Goede <hansg@kernel.org>
+ */
+#ifndef _V4L2_CCI_H
+#define _V4L2_CCI_H
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct i2c_client;
+struct regmap;
+
+/**
+ * struct cci_reg_sequence - An individual write from a sequence of CCI writes
+ *
+ * @reg: Register address, use CCI_REG#() macros to encode reg width
+ * @val: Register value
+ *
+ * Register/value pairs for sequences of writes.
+ */
+struct cci_reg_sequence {
+ u32 reg;
+ u64 val;
+};
+
+/*
+ * Macros to define register address with the register width encoded
+ * into the higher bits.
+ */
+#define CCI_REG_ADDR_MASK GENMASK(15, 0)
+#define CCI_REG_WIDTH_SHIFT 16
+#define CCI_REG_WIDTH_MASK GENMASK(19, 16)
+/*
+ * Private CCI register flags, for the use of drivers.
+ */
+#define CCI_REG_PRIVATE_SHIFT 28U
+#define CCI_REG_PRIVATE_MASK GENMASK(31U, CCI_REG_PRIVATE_SHIFT)
+
+#define CCI_REG_WIDTH_BYTES(x) FIELD_GET(CCI_REG_WIDTH_MASK, x)
+#define CCI_REG_WIDTH(x) (CCI_REG_WIDTH_BYTES(x) << 3)
+#define CCI_REG_ADDR(x) FIELD_GET(CCI_REG_ADDR_MASK, x)
+#define CCI_REG_LE BIT(20)
+
+#define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG16_LE(x) (CCI_REG_LE | (2U << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG24_LE(x) (CCI_REG_LE | (3U << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG32_LE(x) (CCI_REG_LE | (4U << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG64_LE(x) (CCI_REG_LE | (8U << CCI_REG_WIDTH_SHIFT) | (x))
+
+/**
+ * cci_read() - Read a value from a single CCI register
+ *
+ * @map: Register map to read from
+ * @reg: Register address to read, use CCI_REG#() macros to encode reg width
+ * @val: Pointer to store read value
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the read will be skipped
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_read(struct regmap *map, u32 reg, u64 *val, int *err);
+
+/**
+ * cci_write() - Write a value to a single CCI register
+ *
+ * @map: Register map to write to
+ * @reg: Register address to write, use CCI_REG#() macros to encode reg width
+ * @val: Value to be written
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the write will be skipped
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_write(struct regmap *map, u32 reg, u64 val, int *err);
+
+/**
+ * cci_update_bits() - Perform a read/modify/write cycle on
+ * a single CCI register
+ *
+ * @map: Register map to update
+ * @reg: Register address to update, use CCI_REG#() macros to encode reg width
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the update will be skipped
+ *
+ * Note this uses read-modify-write to update the bits, atomicity with regards
+ * to other cci_*() register access functions is NOT guaranteed.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err);
+
+/**
+ * cci_multi_reg_write() - Write multiple registers to the device
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register-address, -value pairs to be
+ * written, register-addresses use CCI_REG#() macros to encode reg width
+ * @num_regs: Number of registers to write
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the write will be skipped
+ *
+ * Write multiple registers to the device where the set of register, value
+ * pairs are supplied in any order, possibly not all in a single range.
+ *
+ * Use of the CCI_REG#() macros to encode reg width is mandatory.
+ *
+ * For raw lists of register-address, -value pairs with only 8 bit
+ * wide writes regmap_multi_reg_write() can be used instead.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err);
+
+#if IS_ENABLED(CONFIG_V4L2_CCI_I2C)
+/**
+ * devm_cci_regmap_init_i2c() - Create regmap to use with cci_*() register
+ * access functions
+ *
+ * @client: i2c_client to create the regmap for
+ * @reg_addr_bits: register address width to use (8 or 16)
+ *
+ * Note the memory for the created regmap is devm() managed, tied to the client.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client,
+ int reg_addr_bits);
+#endif
+
+#endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 1bdaea248089..acf5be24a5ca 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -425,7 +425,7 @@ __v4l2_find_nearest_size(const void *array, size_t array_size,
/**
* v4l2_g_parm_cap - helper routine for vidioc_g_parm to fill this in by
- * calling the g_frame_interval op of the given subdev. It only works
+ * calling the get_frame_interval op of the given subdev. It only works
* for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the
* function name.
*
@@ -438,7 +438,7 @@ int v4l2_g_parm_cap(struct video_device *vdev,
/**
* v4l2_s_parm_cap - helper routine for vidioc_s_parm to fill this in by
- * calling the s_frame_interval op of the given subdev. It only works
+ * calling the set_frame_interval op of the given subdev. It only works
* for V4L2_BUF_TYPE_VIDEO_CAPTURE(_MPLANE), hence the _cap in the
* function name.
*
@@ -480,6 +480,7 @@ enum v4l2_pixel_encoding {
* @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4).
* @comp_planes: Number of component planes, which includes the alpha plane (1 to 4).
* @bpp: Array of per-plane bytes per pixel
+ * @bpp_div: Array of per-plane bytes per pixel divisors to support fractional pixel sizes.
* @hdiv: Horizontal chroma subsampling factor
* @vdiv: Vertical chroma subsampling factor
* @block_w: Per-plane macroblock pixel width (optional)
@@ -491,6 +492,7 @@ struct v4l2_format_info {
u8 mem_planes;
u8 comp_planes;
u8 bpp[4];
+ u8 bpp_div[4];
u8 hdiv;
u8 vdiv;
u8 block_w[4];
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 7788eeb3e2bb..59679a42b3e7 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -52,6 +52,10 @@ struct video_device;
* @p_hdr10_cll: Pointer to an HDR10 Content Light Level structure.
* @p_hdr10_mastering: Pointer to an HDR10 Mastering Display structure.
* @p_area: Pointer to an area.
+ * @p_av1_sequence: Pointer to an AV1 sequence structure.
+ * @p_av1_tile_group_entry: Pointer to an AV1 tile group entry structure.
+ * @p_av1_frame: Pointer to an AV1 frame structure.
+ * @p_av1_film_grain: Pointer to an AV1 film grain structure.
* @p: Pointer to a compound value.
* @p_const: Pointer to a constant compound value.
*/
@@ -81,6 +85,10 @@ union v4l2_ctrl_ptr {
struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll;
struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
struct v4l2_area *p_area;
+ struct v4l2_ctrl_av1_sequence *p_av1_sequence;
+ struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry;
+ struct v4l2_ctrl_av1_frame *p_av1_frame;
+ struct v4l2_ctrl_av1_film_grain *p_av1_film_grain;
void *p;
const void *p_const;
};
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index e0a13505f88d..d82dfdbf6e58 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -284,7 +284,7 @@ struct video_device {
struct v4l2_prio_state *prio;
/* device info */
- char name[32];
+ char name[64];
enum vfl_devnode_type vfl_type;
enum vfl_devnode_direction vfl_dir;
int minor;
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 8a8977a33ec1..f6f111fae33c 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -13,8 +13,6 @@
#include <media/v4l2-subdev.h>
#include <media/v4l2-dev.h>
-#define V4L2_DEVICE_NAME_SIZE (20 + 16)
-
struct v4l2_ctrl_handler;
/**
@@ -49,7 +47,7 @@ struct v4l2_device {
struct media_device *mdev;
struct list_head subdevs;
spinlock_t lock;
- char name[V4L2_DEVICE_NAME_SIZE];
+ char name[36];
void (*notify)(struct v4l2_subdev *sd,
unsigned int notification, void *arg);
struct v4l2_ctrl_handler *ctrl_handler;
diff --git a/include/media/v4l2-event.h b/include/media/v4l2-event.h
index 4ffa914ade3a..3a0e2588361c 100644
--- a/include/media/v4l2-event.h
+++ b/include/media/v4l2-event.h
@@ -78,7 +78,7 @@ struct v4l2_subscribed_event {
unsigned int elems;
unsigned int first;
unsigned int in_use;
- struct v4l2_kevent events[];
+ struct v4l2_kevent events[] __counted_by(elems);
};
/**
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index 394d798f3dfa..f7c57c776589 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -21,10 +21,6 @@
#include <media/v4l2-mediabus.h>
-struct fwnode_handle;
-struct v4l2_async_notifier;
-struct v4l2_async_subdev;
-
/**
* struct v4l2_fwnode_endpoint - the endpoint data structure
* @base: fwnode endpoint of the v4l2_fwnode
@@ -393,72 +389,6 @@ int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
int v4l2_fwnode_device_parse(struct device *dev,
struct v4l2_fwnode_device_properties *props);
-/**
- * typedef parse_endpoint_func - Driver's callback function to be called on
- * each V4L2 fwnode endpoint.
- *
- * @dev: pointer to &struct device
- * @vep: pointer to &struct v4l2_fwnode_endpoint
- * @asd: pointer to &struct v4l2_async_subdev
- *
- * Return:
- * * %0 on success
- * * %-ENOTCONN if the endpoint is to be skipped but this
- * should not be considered as an error
- * * %-EINVAL if the endpoint configuration is invalid
- */
-typedef int (*parse_endpoint_func)(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd);
-
-/**
- * v4l2_async_nf_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
- * device node
- * @dev: the device the endpoints of which are to be parsed
- * @notifier: notifier for @dev
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
- * endpoint. Optional.
- *
- * DEPRECATED! This function is deprecated. Don't use it in new drivers.
- * Instead see an example in cio2_parse_firmware() function in
- * drivers/media/pci/intel/ipu3/ipu3-cio2.c .
- *
- * Parse the fwnode endpoints of the @dev device and populate the async sub-
- * devices list in the notifier. The @parse_endpoint callback function is
- * called for each endpoint with the corresponding async sub-device pointer to
- * let the caller initialize the driver-specific part of the async sub-device
- * structure.
- *
- * The notifier memory shall be zeroed before this function is called on the
- * notifier.
- *
- * This function may not be called on a registered notifier and may be called on
- * a notifier only once.
- *
- * The &struct v4l2_fwnode_endpoint passed to the callback function
- * @parse_endpoint is released once the function is finished. If there is a need
- * to retain that configuration, the user needs to allocate memory for it.
- *
- * Any notifier populated using this function must be released with a call to
- * v4l2_async_nf_cleanup() after it has been unregistered and the async
- * sub-devices are no longer in use, even if the function returned an error.
- *
- * Return: %0 on success, including when no async sub-devices are found
- * %-ENOMEM if memory allocation failed
- * %-EINVAL if graph or endpoint parsing failed
- * Other error codes as returned by @parse_endpoint
- */
-int
-v4l2_async_nf_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint);
-
/* Helper macros to access the connector links. */
/** v4l2_connector_last_link - Helper macro to get the first
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index b39586dfba35..ed0a44b6eada 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -143,6 +143,9 @@ int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd,
* v4l2_pipeline_pm_get - Increase the use count of a pipeline
* @entity: The root entity of a pipeline
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* Update the use count of all entities in the pipeline and power entities on.
*
* This function is intended to be called in video node open. It uses
@@ -157,6 +160,9 @@ int v4l2_pipeline_pm_get(struct media_entity *entity);
* v4l2_pipeline_pm_put - Decrease the use count of a pipeline
* @entity: The root entity of a pipeline
*
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
* Update the use count of all entities in the pipeline and power entities off.
*
* This function is intended to be called in video node release. It uses
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index bb9de6a899e0..7f1af1f7f912 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -84,6 +84,12 @@ struct v4l2_m2m_queue_ctx {
* @last_src_buf: indicate the last source buffer for draining
* @next_buf_last: next capture queud buffer will be tagged as last
* @has_stopped: indicate the device has been stopped
+ * @ignore_cap_streaming: If true, job_ready can be called even if the CAPTURE
+ * queue is not streaming. This allows firmware to
+ * analyze the bitstream header which arrives on the
+ * OUTPUT queue. The driver must implement the job_ready
+ * callback correctly to make sure that the requirements
+ * for actual decoding are met.
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @cap_q_ctx: Capture (output to memory) queue context
* @out_q_ctx: Output (input from memory) queue context
@@ -106,6 +112,7 @@ struct v4l2_m2m_ctx {
struct vb2_v4l2_buffer *last_src_buf;
bool next_buf_last;
bool has_stopped;
+ bool ignore_cap_streaming;
/* internal use only */
struct v4l2_m2m_dev *m2m_dev;
@@ -593,7 +600,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
static inline
unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
- return m2m_ctx->out_q_ctx.num_rdy;
+ unsigned int num_buf_rdy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+ num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+
+ return num_buf_rdy;
}
/**
@@ -605,7 +619,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
static inline
unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
{
- return m2m_ctx->cap_q_ctx.num_rdy;
+ unsigned int num_buf_rdy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+ num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+
+ return num_buf_rdy;
}
/**
@@ -647,7 +668,7 @@ v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
/**
- * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
+ * v4l2_m2m_last_src_buf() - return last source buffer from the list of
* ready buffers
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index b325df0d54d6..a9e6b8146279 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -446,16 +446,12 @@ enum v4l2_subdev_pre_streamon_flags {
* @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the
* sub-device. Failure on stop will remove any resources acquired in
* streaming start, while the error code is still returned by the driver.
- * Also see call_s_stream wrapper in v4l2-subdev.c.
+ * The caller shall track the subdev state, and shall not start or stop an
+ * already started or stopped subdev. Also see call_s_stream wrapper in
+ * v4l2-subdev.c.
*
* @g_pixelaspect: callback to return the pixelaspect ratio.
*
- * @g_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL()
- * ioctl handler code.
- *
- * @s_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL()
- * ioctl handler code.
- *
* @s_dv_timings: Set custom dv timings in the sub device. This is used
* when sub device is capable of setting detailed timing information
* in the hardware to generate/detect the video signal.
@@ -494,10 +490,6 @@ struct v4l2_subdev_video_ops {
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
int (*s_stream)(struct v4l2_subdev *sd, int enable);
int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect);
- int (*g_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval);
- int (*s_frame_interval)(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval);
int (*s_dv_timings)(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings);
int (*g_dv_timings)(struct v4l2_subdev *sd,
@@ -686,21 +678,16 @@ struct v4l2_subdev_ir_ops {
/**
* struct v4l2_subdev_pad_config - Used for storing subdev pad information.
*
- * @try_fmt: &struct v4l2_mbus_framefmt
- * @try_crop: &struct v4l2_rect to be used for crop
- * @try_compose: &struct v4l2_rect to be used for compose
- *
- * This structure only needs to be passed to the pad op if the 'which' field
- * of the main argument is set to %V4L2_SUBDEV_FORMAT_TRY. For
- * %V4L2_SUBDEV_FORMAT_ACTIVE it is safe to pass %NULL.
- *
- * Note: This struct is also used in active state, and the 'try' prefix is
- * historical and to be removed.
+ * @format: &struct v4l2_mbus_framefmt
+ * @crop: &struct v4l2_rect to be used for crop
+ * @compose: &struct v4l2_rect to be used for compose
+ * @interval: frame interval
*/
struct v4l2_subdev_pad_config {
- struct v4l2_mbus_framefmt try_fmt;
- struct v4l2_rect try_crop;
- struct v4l2_rect try_compose;
+ struct v4l2_mbus_framefmt format;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ struct v4l2_fract interval;
};
/**
@@ -712,6 +699,7 @@ struct v4l2_subdev_pad_config {
* @fmt: &struct v4l2_mbus_framefmt
* @crop: &struct v4l2_rect to be used for crop
* @compose: &struct v4l2_rect to be used for compose
+ * @interval: frame interval
*
* This structure stores configuration for a stream.
*/
@@ -723,6 +711,7 @@ struct v4l2_subdev_stream_config {
struct v4l2_mbus_framefmt fmt;
struct v4l2_rect crop;
struct v4l2_rect compose;
+ struct v4l2_fract interval;
};
/**
@@ -754,6 +743,7 @@ struct v4l2_subdev_krouting {
*
* @_lock: default for 'lock'
* @lock: mutex for the state. May be replaced by the user.
+ * @sd: the sub-device which the state is related to
* @pads: &struct v4l2_subdev_pad_config array
* @routing: routing table for the subdev
* @stream_configs: stream configurations (only for V4L2_SUBDEV_FL_STREAMS)
@@ -766,6 +756,7 @@ struct v4l2_subdev_state {
/* lock for the struct v4l2_subdev_state fields */
struct mutex _lock;
struct mutex *lock;
+ struct v4l2_subdev *sd;
struct v4l2_subdev_pad_config *pads;
struct v4l2_subdev_krouting routing;
struct v4l2_subdev_stream_configs stream_configs;
@@ -774,7 +765,6 @@ struct v4l2_subdev_state {
/**
* struct v4l2_subdev_pad_ops - v4l2-subdev pad level operations
*
- * @init_cfg: initialize the pad config to default values
* @enum_mbus_code: callback for VIDIOC_SUBDEV_ENUM_MBUS_CODE() ioctl handler
* code.
* @enum_frame_size: callback for VIDIOC_SUBDEV_ENUM_FRAME_SIZE() ioctl handler
@@ -791,6 +781,12 @@ struct v4l2_subdev_state {
*
* @set_selection: callback for VIDIOC_SUBDEV_S_SELECTION() ioctl handler code.
*
+ * @get_frame_interval: callback for VIDIOC_SUBDEV_G_FRAME_INTERVAL()
+ * ioctl handler code.
+ *
+ * @set_frame_interval: callback for VIDIOC_SUBDEV_S_FRAME_INTERVAL()
+ * ioctl handler code.
+ *
* @get_edid: callback for VIDIOC_SUBDEV_G_EDID() ioctl handler code.
*
* @set_edid: callback for VIDIOC_SUBDEV_S_EDID() ioctl handler code.
@@ -822,8 +818,9 @@ struct v4l2_subdev_state {
* operation shall fail if the pad index it has been called on
* is not valid or in case of unrecoverable failures.
*
- * @set_routing: enable or disable data connection routes described in the
- * subdevice routing table.
+ * @set_routing: Enable or disable data connection routes described in the
+ * subdevice routing table. Subdevs that implement this operation
+ * must set the V4L2_SUBDEV_FL_STREAMS flag.
*
* @enable_streams: Enable the streams defined in streams_mask on the given
* source pad. Subdevs that implement this operation must use the active
@@ -838,8 +835,6 @@ struct v4l2_subdev_state {
* directly, use v4l2_subdev_disable_streams() instead.
*/
struct v4l2_subdev_pad_ops {
- int (*init_cfg)(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state);
int (*enum_mbus_code)(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code);
@@ -861,6 +856,12 @@ struct v4l2_subdev_pad_ops {
int (*set_selection)(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_selection *sel);
+ int (*get_frame_interval)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *interval);
+ int (*set_frame_interval)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *interval);
int (*get_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*set_edid)(struct v4l2_subdev *sd, struct v4l2_edid *edid);
int (*dv_timings_cap)(struct v4l2_subdev *sd,
@@ -916,6 +917,8 @@ struct v4l2_subdev_ops {
/**
* struct v4l2_subdev_internal_ops - V4L2 subdev internal ops
*
+ * @init_state: initialize the subdev state to default values
+ *
* @registered: called when this subdev is registered. When called the v4l2_dev
* field is set to the correct v4l2_device.
*
@@ -941,6 +944,8 @@ struct v4l2_subdev_ops {
* these ops.
*/
struct v4l2_subdev_internal_ops {
+ int (*init_state)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state);
int (*registered)(struct v4l2_subdev *sd);
void (*unregistered)(struct v4l2_subdev *sd);
int (*open)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
@@ -948,8 +953,6 @@ struct v4l2_subdev_internal_ops {
void (*release)(struct v4l2_subdev *sd);
};
-#define V4L2_SUBDEV_NAME_SIZE 32
-
/* Set this flag if this subdev is a i2c device. */
#define V4L2_SUBDEV_FL_IS_I2C (1U << 0)
/* Set this flag if this subdev is a spi device. */
@@ -1020,12 +1023,14 @@ struct v4l2_subdev_platform_data {
* @dev: pointer to the physical device, if any
* @fwnode: The fwnode_handle of the subdev, usually the same as
* either dev->of_node->fwnode or dev->fwnode (whichever is non-NULL).
- * @async_list: Links this subdev to a global subdev_list or @notifier->done
- * list.
- * @asd: Pointer to respective &struct v4l2_async_subdev.
- * @notifier: Pointer to the managing notifier.
+ * @async_list: Links this subdev to a global subdev_list or
+ * @notifier->done_list list.
+ * @async_subdev_endpoint_list: List entry in async_subdev_endpoint_entry of
+ * &struct v4l2_async_subdev_endpoint.
* @subdev_notifier: A sub-device notifier implicitly registered for the sub-
* device using v4l2_async_register_subdev_sensor().
+ * @asc_list: Async connection list, of &struct
+ * v4l2_async_connection.subdev_entry.
* @pdata: common part of subdevice platform data
* @state_lock: A pointer to a lock used for all the subdev's states, set by the
* driver. This is optional. If NULL, each state instance will get
@@ -1057,7 +1062,7 @@ struct v4l2_subdev {
const struct v4l2_subdev_ops *ops;
const struct v4l2_subdev_internal_ops *internal_ops;
struct v4l2_ctrl_handler *ctrl_handler;
- char name[V4L2_SUBDEV_NAME_SIZE];
+ char name[52];
u32 grp_id;
void *dev_priv;
void *host_priv;
@@ -1065,9 +1070,9 @@ struct v4l2_subdev {
struct device *dev;
struct fwnode_handle *fwnode;
struct list_head async_list;
- struct v4l2_async_subdev *asd;
- struct v4l2_async_notifier *notifier;
+ struct list_head async_subdev_endpoint_list;
struct v4l2_async_notifier *subdev_notifier;
+ struct list_head asc_list;
struct v4l2_subdev_platform_data *pdata;
struct mutex *state_lock;
@@ -1139,83 +1144,6 @@ struct v4l2_subdev_fh {
#define to_v4l2_subdev_fh(fh) \
container_of(fh, struct v4l2_subdev_fh, vfh)
-#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
-
-/**
- * v4l2_subdev_get_pad_format - ancillary routine to call
- * &struct v4l2_subdev_pad_config->try_fmt
- *
- * @sd: pointer to &struct v4l2_subdev
- * @state: pointer to &struct v4l2_subdev_state
- * @pad: index of the pad in the &struct v4l2_subdev_state->pads array
- */
-static inline struct v4l2_mbus_framefmt *
-v4l2_subdev_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- unsigned int pad)
-{
- if (WARN_ON(!state))
- return NULL;
- if (WARN_ON(pad >= sd->entity.num_pads))
- pad = 0;
- return &state->pads[pad].try_fmt;
-}
-
-/**
- * v4l2_subdev_get_pad_crop - ancillary routine to call
- * &struct v4l2_subdev_pad_config->try_crop
- *
- * @sd: pointer to &struct v4l2_subdev
- * @state: pointer to &struct v4l2_subdev_state.
- * @pad: index of the pad in the &struct v4l2_subdev_state->pads array.
- */
-static inline struct v4l2_rect *
-v4l2_subdev_get_pad_crop(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- unsigned int pad)
-{
- if (WARN_ON(!state))
- return NULL;
- if (WARN_ON(pad >= sd->entity.num_pads))
- pad = 0;
- return &state->pads[pad].try_crop;
-}
-
-/**
- * v4l2_subdev_get_pad_compose - ancillary routine to call
- * &struct v4l2_subdev_pad_config->try_compose
- *
- * @sd: pointer to &struct v4l2_subdev
- * @state: pointer to &struct v4l2_subdev_state.
- * @pad: index of the pad in the &struct v4l2_subdev_state->pads array.
- */
-static inline struct v4l2_rect *
-v4l2_subdev_get_pad_compose(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- unsigned int pad)
-{
- if (WARN_ON(!state))
- return NULL;
- if (WARN_ON(pad >= sd->entity.num_pads))
- pad = 0;
- return &state->pads[pad].try_compose;
-}
-
-/*
- * Temprary helpers until uses of v4l2_subdev_get_try_* functions have been
- * renamed
- */
-#define v4l2_subdev_get_try_format(sd, state, pad) \
- v4l2_subdev_get_pad_format(sd, state, pad)
-
-#define v4l2_subdev_get_try_crop(sd, state, pad) \
- v4l2_subdev_get_pad_crop(sd, state, pad)
-
-#define v4l2_subdev_get_try_compose(sd, state, pad) \
- v4l2_subdev_get_pad_compose(sd, state, pad)
-
-#endif /* CONFIG_VIDEO_V4L2_SUBDEV_API */
-
extern const struct v4l2_file_operations v4l2_subdev_fops;
/**
@@ -1383,93 +1311,111 @@ int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
* v4l2_subdev_cleanup() - Releases the resources allocated by the subdevice
* @sd: The subdevice
*
- * This function will release the resources allocated in
- * v4l2_subdev_init_finalize.
+ * Clean up a V4L2 async sub-device. Must be called for a sub-device as part of
+ * its release if resources have been associated with it using
+ * v4l2_async_subdev_endpoint_add() or v4l2_subdev_init_finalize().
*/
void v4l2_subdev_cleanup(struct v4l2_subdev *sd);
-/**
- * v4l2_subdev_lock_state() - Locks the subdev state
- * @state: The subdevice state
- *
- * Locks the given subdev state.
- *
- * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+/*
+ * A macro to generate the macro or function name for sub-devices state access
+ * wrapper macros below.
*/
-static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state)
-{
- mutex_lock(state->lock);
-}
+#define __v4l2_subdev_state_gen_call(NAME, _1, ARG, ...) \
+ __v4l2_subdev_state_get_ ## NAME ## ARG
/**
- * v4l2_subdev_unlock_state() - Unlocks the subdev state
- * @state: The subdevice state
+ * v4l2_subdev_state_get_format() - Get pointer to a stream format
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
*
- * Unlocks the given subdev state.
+ * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad +
+ * stream in the subdev state.
+ *
+ * For stream-unaware drivers the format for the corresponding pad is returned.
+ * If the pad does not exist, NULL is returned.
*/
-static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state)
-{
- mutex_unlock(state->lock);
-}
+/*
+ * Wrap v4l2_subdev_state_get_format(), allowing the function to be called with
+ * two or three arguments. The purpose of the __v4l2_subdev_state_get_format()
+ * macro below is to come up with the name of the function or macro to call,
+ * using the last two arguments (_stream and _pad). The selected function or
+ * macro is then called using the arguments specified by the caller. A similar
+ * arrangement is used for v4l2_subdev_state_crop() and
+ * v4l2_subdev_state_compose() below.
+ */
+#define v4l2_subdev_state_get_format(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(format, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_format_pad(state, pad) \
+ __v4l2_subdev_state_get_format(state, pad, 0)
+struct v4l2_mbus_framefmt *
+__v4l2_subdev_state_get_format(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
/**
- * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state
- * is unlocked and returns it
- * @sd: The subdevice
+ * v4l2_subdev_state_get_crop() - Get pointer to a stream crop rectangle
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
*
- * Returns the active state for the subdevice, or NULL if the subdev does not
- * support active state. If the state is not NULL, calls
- * lockdep_assert_not_held() to issue a warning if the state is locked.
+ * This returns a pointer to crop rectangle for the given pad + stream in the
+ * subdev state.
*
- * This function is to be used e.g. when getting the active state for the sole
- * purpose of passing it forward, without accessing the state fields.
+ * For stream-unaware drivers the crop rectangle for the corresponding pad is
+ * returned. If the pad does not exist, NULL is returned.
*/
-static inline struct v4l2_subdev_state *
-v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd)
-{
- if (sd->active_state)
- lockdep_assert_not_held(sd->active_state->lock);
- return sd->active_state;
-}
+#define v4l2_subdev_state_get_crop(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(crop, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_crop_pad(state, pad) \
+ __v4l2_subdev_state_get_crop(state, pad, 0)
+struct v4l2_rect *
+__v4l2_subdev_state_get_crop(struct v4l2_subdev_state *state, unsigned int pad,
+ u32 stream);
/**
- * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state
- * is locked and returns it
- *
- * @sd: The subdevice
+ * v4l2_subdev_state_get_compose() - Get pointer to a stream compose rectangle
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
*
- * Returns the active state for the subdevice, or NULL if the subdev does not
- * support active state. If the state is not NULL, calls lockdep_assert_held()
- * to issue a warning if the state is not locked.
+ * This returns a pointer to compose rectangle for the given pad + stream in the
+ * subdev state.
*
- * This function is to be used when the caller knows that the active state is
- * already locked.
+ * For stream-unaware drivers the compose rectangle for the corresponding pad is
+ * returned. If the pad does not exist, NULL is returned.
*/
-static inline struct v4l2_subdev_state *
-v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd)
-{
- if (sd->active_state)
- lockdep_assert_held(sd->active_state->lock);
- return sd->active_state;
-}
+#define v4l2_subdev_state_get_compose(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(compose, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_compose_pad(state, pad) \
+ __v4l2_subdev_state_get_compose(state, pad, 0)
+struct v4l2_rect *
+__v4l2_subdev_state_get_compose(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
/**
- * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev
- * state for the subdevice
- * @sd: The subdevice
+ * v4l2_subdev_state_get_interval() - Get pointer to a stream frame interval
+ * @state: subdevice state
+ * @pad: pad id
+ * @...: stream id (optional argument)
*
- * Returns the locked active state for the subdevice, or NULL if the subdev
- * does not support active state.
+ * This returns a pointer to the frame interval for the given pad + stream in
+ * the subdev state.
*
- * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ * For stream-unaware drivers the frame interval for the corresponding pad is
+ * returned. If the pad does not exist, NULL is returned.
*/
-static inline struct v4l2_subdev_state *
-v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd)
-{
- if (sd->active_state)
- v4l2_subdev_lock_state(sd->active_state);
- return sd->active_state;
-}
+#define v4l2_subdev_state_get_interval(state, pad, ...) \
+ __v4l2_subdev_state_gen_call(interval, ##__VA_ARGS__, , _pad) \
+ (state, pad, ##__VA_ARGS__)
+#define __v4l2_subdev_state_get_interval_pad(state, pad) \
+ __v4l2_subdev_state_get_interval(state, pad, 0)
+struct v4l2_fract *
+__v4l2_subdev_state_get_interval(struct v4l2_subdev_state *state,
+ unsigned int pad, u32 stream);
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
@@ -1491,6 +1437,24 @@ int v4l2_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format);
/**
+ * v4l2_subdev_get_frame_interval() - Fill frame interval based on state
+ * @sd: subdevice
+ * @state: subdevice state
+ * @fi: pointer to &struct v4l2_subdev_frame_interval
+ *
+ * Fill @fi->interval field based on the information in the @fi struct.
+ *
+ * This function can be used by the subdev drivers which support active state to
+ * implement v4l2_subdev_pad_ops.get_frame_interval if the subdev driver does
+ * not need to do anything special in their get_frame_interval op.
+ *
+ * Returns 0 on success, error value otherwise.
+ */
+int v4l2_subdev_get_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_interval *fi);
+
+/**
* v4l2_subdev_set_routing() - Set given routing to subdev state
* @sd: The subdevice
* @state: The subdevice state
@@ -1532,56 +1496,10 @@ __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
*/
int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
- struct v4l2_subdev_krouting *routing,
+ const struct v4l2_subdev_krouting *routing,
const struct v4l2_mbus_framefmt *fmt);
/**
- * v4l2_subdev_state_get_stream_format() - Get pointer to a stream format
- * @state: subdevice state
- * @pad: pad id
- * @stream: stream id
- *
- * This returns a pointer to &struct v4l2_mbus_framefmt for the given pad +
- * stream in the subdev state.
- *
- * If the state does not contain the given pad + stream, NULL is returned.
- */
-struct v4l2_mbus_framefmt *
-v4l2_subdev_state_get_stream_format(struct v4l2_subdev_state *state,
- unsigned int pad, u32 stream);
-
-/**
- * v4l2_subdev_state_get_stream_crop() - Get pointer to a stream crop rectangle
- * @state: subdevice state
- * @pad: pad id
- * @stream: stream id
- *
- * This returns a pointer to crop rectangle for the given pad + stream in the
- * subdev state.
- *
- * If the state does not contain the given pad + stream, NULL is returned.
- */
-struct v4l2_rect *
-v4l2_subdev_state_get_stream_crop(struct v4l2_subdev_state *state,
- unsigned int pad, u32 stream);
-
-/**
- * v4l2_subdev_state_get_stream_compose() - Get pointer to a stream compose
- * rectangle
- * @state: subdevice state
- * @pad: pad id
- * @stream: stream id
- *
- * This returns a pointer to compose rectangle for the given pad + stream in the
- * subdev state.
- *
- * If the state does not contain the given pad + stream, NULL is returned.
- */
-struct v4l2_rect *
-v4l2_subdev_state_get_stream_compose(struct v4l2_subdev_state *state,
- unsigned int pad, u32 stream);
-
-/**
* v4l2_subdev_routing_find_opposite_end() - Find the opposite stream
* @routing: routing used to find the opposite side
* @pad: pad id
@@ -1783,6 +1701,89 @@ int v4l2_subdev_s_stream_helper(struct v4l2_subdev *sd, int enable);
#endif /* CONFIG_MEDIA_CONTROLLER */
/**
+ * v4l2_subdev_lock_state() - Locks the subdev state
+ * @state: The subdevice state
+ *
+ * Locks the given subdev state.
+ *
+ * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ */
+static inline void v4l2_subdev_lock_state(struct v4l2_subdev_state *state)
+{
+ mutex_lock(state->lock);
+}
+
+/**
+ * v4l2_subdev_unlock_state() - Unlocks the subdev state
+ * @state: The subdevice state
+ *
+ * Unlocks the given subdev state.
+ */
+static inline void v4l2_subdev_unlock_state(struct v4l2_subdev_state *state)
+{
+ mutex_unlock(state->lock);
+}
+
+/**
+ * v4l2_subdev_get_unlocked_active_state() - Checks that the active subdev state
+ * is unlocked and returns it
+ * @sd: The subdevice
+ *
+ * Returns the active state for the subdevice, or NULL if the subdev does not
+ * support active state. If the state is not NULL, calls
+ * lockdep_assert_not_held() to issue a warning if the state is locked.
+ *
+ * This function is to be used e.g. when getting the active state for the sole
+ * purpose of passing it forward, without accessing the state fields.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_get_unlocked_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ lockdep_assert_not_held(sd->active_state->lock);
+ return sd->active_state;
+}
+
+/**
+ * v4l2_subdev_get_locked_active_state() - Checks that the active subdev state
+ * is locked and returns it
+ *
+ * @sd: The subdevice
+ *
+ * Returns the active state for the subdevice, or NULL if the subdev does not
+ * support active state. If the state is not NULL, calls lockdep_assert_held()
+ * to issue a warning if the state is not locked.
+ *
+ * This function is to be used when the caller knows that the active state is
+ * already locked.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_get_locked_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ lockdep_assert_held(sd->active_state->lock);
+ return sd->active_state;
+}
+
+/**
+ * v4l2_subdev_lock_and_get_active_state() - Locks and returns the active subdev
+ * state for the subdevice
+ * @sd: The subdevice
+ *
+ * Returns the locked active state for the subdevice, or NULL if the subdev
+ * does not support active state.
+ *
+ * The state must be unlocked with v4l2_subdev_unlock_state() after use.
+ */
+static inline struct v4l2_subdev_state *
+v4l2_subdev_lock_and_get_active_state(struct v4l2_subdev *sd)
+{
+ if (sd->active_state)
+ v4l2_subdev_lock_state(sd->active_state);
+ return sd->active_state;
+}
+
+/**
* v4l2_subdev_init - initializes the sub-device struct
*
* @sd: pointer to the &struct v4l2_subdev to be initialized
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
deleted file mode 100644
index 2e01b2e9a1c0..000000000000
--- a/include/media/videobuf-core.h
+++ /dev/null
@@ -1,233 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * generic helper functions for handling video4linux capture buffers
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-
-#ifndef _VIDEOBUF_CORE_H
-#define _VIDEOBUF_CORE_H
-
-#include <linux/poll.h>
-#include <linux/videodev2.h>
-
-#define UNSET (-1U)
-
-
-struct videobuf_buffer;
-struct videobuf_queue;
-
-/* --------------------------------------------------------------------- */
-
-/*
- * A small set of helper functions to manage video4linux buffers.
- *
- * struct videobuf_buffer holds the data structures used by the helper
- * functions, additionally some commonly used fields for v4l buffers
- * (width, height, lists, waitqueue) are in there. That struct should
- * be used as first element in the drivers buffer struct.
- *
- * about the mmap helpers (videobuf_mmap_*):
- *
- * The mmaper function allows to map any subset of contiguous buffers.
- * This includes one mmap() call for all buffers (which the original
- * video4linux API uses) as well as one mmap() for every single buffer
- * (which v4l2 uses).
- *
- * If there is a valid mapping for a buffer, buffer->baddr/bsize holds
- * userspace address + size which can be fed into the
- * videobuf_dma_init_user function listed above.
- *
- */
-
-struct videobuf_mapping {
- unsigned int count;
- struct videobuf_queue *q;
-};
-
-enum videobuf_state {
- VIDEOBUF_NEEDS_INIT = 0,
- VIDEOBUF_PREPARED = 1,
- VIDEOBUF_QUEUED = 2,
- VIDEOBUF_ACTIVE = 3,
- VIDEOBUF_DONE = 4,
- VIDEOBUF_ERROR = 5,
- VIDEOBUF_IDLE = 6,
-};
-
-struct videobuf_buffer {
- unsigned int i;
- u32 magic;
-
- /* info about the buffer */
- unsigned int width;
- unsigned int height;
- unsigned int bytesperline; /* use only if != 0 */
- unsigned long size;
- enum v4l2_field field;
- enum videobuf_state state;
- struct list_head stream; /* QBUF/DQBUF list */
-
- /* touched by irq handler */
- struct list_head queue;
- wait_queue_head_t done;
- unsigned int field_count;
- u64 ts;
-
- /* Memory type */
- enum v4l2_memory memory;
-
- /* buffer size */
- size_t bsize;
-
- /* buffer offset (mmap + overlay) */
- size_t boff;
-
- /* buffer addr (userland ptr!) */
- unsigned long baddr;
-
- /* for mmap'ed buffers */
- struct videobuf_mapping *map;
-
- /* Private pointer to allow specific methods to store their data */
- int privsize;
- void *priv;
-};
-
-struct videobuf_queue_ops {
- int (*buf_setup)(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size);
- int (*buf_prepare)(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field);
- void (*buf_queue)(struct videobuf_queue *q,
- struct videobuf_buffer *vb);
- void (*buf_release)(struct videobuf_queue *q,
- struct videobuf_buffer *vb);
-};
-
-#define MAGIC_QTYPE_OPS 0x12261003
-
-/* Helper operations - device type dependent */
-struct videobuf_qtype_ops {
- u32 magic;
-
- struct videobuf_buffer *(*alloc_vb)(size_t size);
- void *(*vaddr) (struct videobuf_buffer *buf);
- int (*iolock) (struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf);
- int (*sync) (struct videobuf_queue *q,
- struct videobuf_buffer *buf);
- int (*mmap_mapper) (struct videobuf_queue *q,
- struct videobuf_buffer *buf,
- struct vm_area_struct *vma);
-};
-
-struct videobuf_queue {
- struct mutex vb_lock;
- struct mutex *ext_lock;
- spinlock_t *irqlock;
- struct device *dev;
-
- wait_queue_head_t wait; /* wait if queue is empty */
-
- enum v4l2_buf_type type;
- unsigned int msize;
- enum v4l2_field field;
- enum v4l2_field last; /* for field=V4L2_FIELD_ALTERNATE */
- struct videobuf_buffer *bufs[VIDEO_MAX_FRAME];
- const struct videobuf_queue_ops *ops;
- struct videobuf_qtype_ops *int_ops;
-
- unsigned int streaming:1;
- unsigned int reading:1;
-
- /* capture via mmap() + ioctl(QBUF/DQBUF) */
- struct list_head stream;
-
- /* capture via read() */
- unsigned int read_off;
- struct videobuf_buffer *read_buf;
-
- /* driver private data */
- void *priv_data;
-};
-
-static inline void videobuf_queue_lock(struct videobuf_queue *q)
-{
- if (!q->ext_lock)
- mutex_lock(&q->vb_lock);
-}
-
-static inline void videobuf_queue_unlock(struct videobuf_queue *q)
-{
- if (!q->ext_lock)
- mutex_unlock(&q->vb_lock);
-}
-
-int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
- int non_blocking, int intr);
-int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
- struct v4l2_framebuffer *fbuf);
-
-struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q);
-
-/* Used on videobuf-dvb */
-void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
- struct videobuf_buffer *buf);
-
-void videobuf_queue_core_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct videobuf_qtype_ops *int_ops,
- struct mutex *ext_lock);
-int videobuf_queue_is_busy(struct videobuf_queue *q);
-void videobuf_queue_cancel(struct videobuf_queue *q);
-
-enum v4l2_field videobuf_next_field(struct videobuf_queue *q);
-int videobuf_reqbufs(struct videobuf_queue *q,
- struct v4l2_requestbuffers *req);
-int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b);
-int videobuf_qbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b);
-int videobuf_dqbuf(struct videobuf_queue *q,
- struct v4l2_buffer *b, int nonblocking);
-int videobuf_streamon(struct videobuf_queue *q);
-int videobuf_streamoff(struct videobuf_queue *q);
-
-void videobuf_stop(struct videobuf_queue *q);
-
-int videobuf_read_start(struct videobuf_queue *q);
-void videobuf_read_stop(struct videobuf_queue *q);
-ssize_t videobuf_read_stream(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int vbihack, int nonblocking);
-ssize_t videobuf_read_one(struct videobuf_queue *q,
- char __user *data, size_t count, loff_t *ppos,
- int nonblocking);
-__poll_t videobuf_poll_stream(struct file *file,
- struct videobuf_queue *q,
- poll_table *wait);
-
-int videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory);
-int __videobuf_mmap_setup(struct videobuf_queue *q,
- unsigned int bcount, unsigned int bsize,
- enum v4l2_memory memory);
-int videobuf_mmap_free(struct videobuf_queue *q);
-int videobuf_mmap_mapper(struct videobuf_queue *q,
- struct vm_area_struct *vma);
-
-#endif
diff --git a/include/media/videobuf-dma-contig.h b/include/media/videobuf-dma-contig.h
deleted file mode 100644
index 525883b2c53e..000000000000
--- a/include/media/videobuf-dma-contig.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for physically contiguous capture buffers
- *
- * The functions support hardware lacking scatter gather support
- * (i.e. the buffers must be linear in physical memory)
- *
- * Copyright (c) 2008 Magnus Damm
- */
-#ifndef _VIDEOBUF_DMA_CONTIG_H
-#define _VIDEOBUF_DMA_CONTIG_H
-
-#include <linux/dma-mapping.h>
-#include <media/videobuf-core.h>
-
-void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock);
-
-dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
-void videobuf_dma_contig_free(struct videobuf_queue *q,
- struct videobuf_buffer *buf);
-
-#endif /* _VIDEOBUF_DMA_CONTIG_H */
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
deleted file mode 100644
index 930ff8d454fc..000000000000
--- a/include/media/videobuf-dma-sg.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for SG DMA video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-#ifndef _VIDEOBUF_DMA_SG_H
-#define _VIDEOBUF_DMA_SG_H
-
-#include <media/videobuf-core.h>
-
-/* --------------------------------------------------------------------- */
-
-/*
- * A small set of helper functions to manage buffers (both userland
- * and kernel) for DMA.
- *
- * videobuf_dma_init_*()
- * creates a buffer. The userland version takes a userspace
- * pointer + length. The kernel version just wants the size and
- * does memory allocation too using vmalloc_32().
- *
- * videobuf_dma_*()
- * see Documentation/core-api/dma-api-howto.rst, these functions to
- * basically the same. The map function does also build a
- * scatterlist for the buffer (and unmap frees it ...)
- *
- * videobuf_dma_free()
- * no comment ...
- *
- */
-
-struct videobuf_dmabuf {
- u32 magic;
-
- /* for userland buffer */
- int offset;
- size_t size;
- struct page **pages;
-
- /* for kernel buffers */
- void *vaddr;
- struct page **vaddr_pages;
- dma_addr_t *dma_addr;
- struct device *dev;
-
- /* for overlay buffers (pci-pci dma) */
- dma_addr_t bus_addr;
-
- /* common */
- struct scatterlist *sglist;
- int sglen;
- unsigned long nr_pages;
- int direction;
-};
-
-struct videobuf_dma_sg_memory {
- u32 magic;
-
- /* for mmap'ed buffers */
- struct videobuf_dmabuf dma;
-};
-
-/*
- * Scatter-gather DMA buffer API.
- *
- * These functions provide a simple way to create a page list and a
- * scatter-gather list from a kernel, userspace of physical address and map the
- * memory for DMA operation.
- *
- * Despite the name, this is totally unrelated to videobuf, except that
- * videobuf-dma-sg uses the same API internally.
- */
-int videobuf_dma_free(struct videobuf_dmabuf *dma);
-
-int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma);
-struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
-
-void *videobuf_sg_alloc(size_t size);
-
-void videobuf_queue_sg_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock);
-
-#endif /* _VIDEOBUF_DMA_SG_H */
-
diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h
deleted file mode 100644
index e930dbb9d7f4..000000000000
--- a/include/media/videobuf-vmalloc.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for vmalloc capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks). They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- */
-#ifndef _VIDEOBUF_VMALLOC_H
-#define _VIDEOBUF_VMALLOC_H
-
-#include <media/videobuf-core.h>
-
-/* --------------------------------------------------------------------- */
-
-struct videobuf_vmalloc_memory {
- u32 magic;
-
- void *vaddr;
-
- /* remap_vmalloc_range seems to need to run
- * after mmap() on some cases */
- struct vm_area_struct *vma;
-};
-
-void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
- const struct videobuf_queue_ops *ops,
- struct device *dev,
- spinlock_t *irqlock,
- enum v4l2_buf_type type,
- enum v4l2_field field,
- unsigned int msize,
- void *priv,
- struct mutex *ext_lock);
-
-void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
-
-void videobuf_vmalloc_free(struct videobuf_buffer *buf);
-
-#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 4b6a9d2ea372..56719a26a46c 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -402,7 +402,7 @@ struct vb2_buffer {
* by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED.
* If you need a minimum number of buffers before you can
* start streaming, then set
- * &vb2_queue->min_buffers_needed. If that is non-zero
+ * &vb2_queue->min_queued_buffers. If that is non-zero
* then @start_streaming won't be called until at least
* that many buffers have been queued up by userspace.
* @stop_streaming: called when 'streaming' state must be disabled; driver
@@ -546,10 +546,13 @@ struct vb2_buf_ops {
* @gfp_flags: additional gfp flags used when allocating the buffers.
* Typically this is 0, but it may be e.g. %GFP_DMA or %__GFP_DMA32
* to force the buffer allocation to a specific memory zone.
- * @min_buffers_needed: the minimum number of buffers needed before
+ * @min_queued_buffers: the minimum number of queued buffers needed before
* @start_streaming can be called. Used when a DMA engine
* cannot be started unless at least this number of buffers
* have been queued into the driver.
+ * VIDIOC_REQBUFS will ensure at least @min_queued_buffers
+ * buffers will be allocated. Note that VIDIOC_CREATE_BUFS will not
+ * modify the requested buffer count.
*/
/*
* Private elements (won't appear at the uAPI book):
@@ -558,6 +561,9 @@ struct vb2_buf_ops {
* @dma_dir: DMA mapping direction.
* @bufs: videobuf2 buffer structures
* @num_buffers: number of allocated/used buffers
+ * @max_num_buffers: upper limit of number of allocated/used buffers.
+ * If set to 0 v4l2 core will change it VB2_MAX_FRAME
+ * for backward compatibility.
* @queued_list: list of buffers currently queued from userspace
* @queued_count: number of buffers queued and ready for streaming.
* @owned_by_drv_count: number of buffers owned by the driver
@@ -611,7 +617,7 @@ struct vb2_queue {
unsigned int buf_struct_size;
u32 timestamp_flags;
gfp_t gfp_flags;
- u32 min_buffers_needed;
+ u32 min_queued_buffers;
struct device *alloc_devs[VB2_MAX_PLANES];
@@ -619,8 +625,9 @@ struct vb2_queue {
struct mutex mmap_lock;
unsigned int memory;
enum dma_data_direction dma_dir;
- struct vb2_buffer *bufs[VB2_MAX_FRAME];
+ struct vb2_buffer **bufs;
unsigned int num_buffers;
+ unsigned int max_num_buffers;
struct list_head queued_list;
unsigned int queued_count;
@@ -747,7 +754,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q);
/**
* vb2_core_querybuf() - query video buffer information.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @index: id number of the buffer.
+ * @vb: pointer to struct &vb2_buffer.
* @pb: buffer struct passed from userspace.
*
* Videobuf2 core helper to implement VIDIOC_QUERYBUF() operation. It is called
@@ -759,7 +766,7 @@ int vb2_wait_for_all_buffers(struct vb2_queue *q);
*
* Return: returns zero on success; an error code otherwise.
*/
-void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb);
+void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb);
/**
* vb2_core_reqbufs() - Initiate streaming.
@@ -823,7 +830,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
* vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
* to the kernel.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @index: id number of the buffer.
+ * @vb: pointer to struct &vb2_buffer.
* @pb: buffer structure passed from userspace to
* &v4l2_ioctl_ops->vidioc_prepare_buf handler in driver.
*
@@ -839,13 +846,13 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
*
* Return: returns zero on success; an error code otherwise.
*/
-int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
+int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb);
/**
* vb2_core_qbuf() - Queue a buffer from userspace
*
* @q: pointer to &struct vb2_queue with videobuf2 queue.
- * @index: id number of the buffer
+ * @vb: pointer to struct &vb2_buffer.
* @pb: buffer structure passed from userspace to
* v4l2_ioctl_ops->vidioc_qbuf handler in driver
* @req: pointer to &struct media_request, may be NULL.
@@ -867,7 +874,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
*
* Return: returns zero on success; an error code otherwise.
*/
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb,
struct media_request *req);
/**
@@ -931,7 +938,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
* @fd: pointer to the file descriptor associated with DMABUF
* (set by driver).
* @type: buffer type.
- * @index: id number of the buffer.
+ * @vb: pointer to struct &vb2_buffer.
* @plane: index of the plane to be exported, 0 for single plane queues
* @flags: file flags for newly created file, as defined at
* include/uapi/asm-generic/fcntl.h.
@@ -945,7 +952,7 @@ int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
* Return: returns zero on success; an error code otherwise.
*/
int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
- unsigned int index, unsigned int plane, unsigned int flags);
+ struct vb2_buffer *vb, unsigned int plane, unsigned int flags);
/**
* vb2_core_queue_init() - initialize a videobuf2 queue
@@ -1140,6 +1147,15 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q)
}
/**
+ * vb2_get_num_buffers() - get the number of buffer in a queue
+ * @q: pointer to &struct vb2_queue with videobuf2 queue.
+ */
+static inline unsigned int vb2_get_num_buffers(struct vb2_queue *q)
+{
+ return q->num_buffers;
+}
+
+/**
* vb2_is_busy() - return busy status of the queue.
* @q: pointer to &struct vb2_queue with videobuf2 queue.
*
@@ -1147,7 +1163,7 @@ static inline bool vb2_fileio_is_active(struct vb2_queue *q)
*/
static inline bool vb2_is_busy(struct vb2_queue *q)
{
- return (q->num_buffers > 0);
+ return vb2_get_num_buffers(q) > 0;
}
/**
@@ -1239,6 +1255,12 @@ static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q)
static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q,
unsigned int index)
{
+ if (!q->bufs)
+ return NULL;
+
+ if (index >= q->max_num_buffers)
+ return NULL;
+
if (index < q->num_buffers)
return q->bufs[index];
return NULL;
diff --git a/include/net/Space.h b/include/net/Space.h
index 08ca9cef0213..ef42629f4258 100644
--- a/include/net/Space.h
+++ b/include/net/Space.h
@@ -3,18 +3,10 @@
* ethernet adaptor have the name "eth[0123...]".
*/
-struct net_device *hp100_probe(int unit);
struct net_device *ultra_probe(int unit);
struct net_device *wd_probe(int unit);
struct net_device *ne_probe(int unit);
-struct net_device *fmv18x_probe(int unit);
-struct net_device *ni65_probe(int unit);
-struct net_device *sonic_probe(int unit);
struct net_device *smc_init(int unit);
struct net_device *cs89x0_probe(int unit);
struct net_device *tc515_probe(int unit);
struct net_device *lance_probe(int unit);
-struct net_device *cops_probe(int unit);
-
-/* Fibre Channel adapters */
-int iph5526_probe(struct net_device *dev);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 4ae0580b63ca..e1e5e72b901e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -137,6 +137,7 @@ struct tc_action_ops {
#ifdef CONFIG_NET_CLS_ACT
+#define ACT_P_BOUND 0
#define ACT_P_CREATED 1
#define ACT_P_DELETED 1
@@ -191,7 +192,7 @@ int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
struct nlattr *est, struct tc_action **a,
const struct tc_action_ops *ops, int bind,
u32 flags);
-void tcf_idr_insert_many(struct tc_action *actions[]);
+void tcf_idr_insert_many(struct tc_action *actions[], int init_res[]);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
struct tc_action **a, int bind);
@@ -207,8 +208,7 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est,
struct tc_action *actions[], int init_res[], size_t *attr_size,
u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
-struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police,
- bool rtnl_held,
+struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, u32 flags,
struct netlink_ext_ack *extack);
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 82da55101b5a..61ebe723ee4d 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -31,17 +31,22 @@ struct prefix_info {
__u8 length;
__u8 prefix_len;
+ union __packed {
+ __u8 flags;
+ struct __packed {
#if defined(__BIG_ENDIAN_BITFIELD)
- __u8 onlink : 1,
+ __u8 onlink : 1,
autoconf : 1,
reserved : 6;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 reserved : 6,
+ __u8 reserved : 6,
autoconf : 1,
onlink : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
+ };
+ };
__be32 valid;
__be32 prefered;
__be32 reserved2;
@@ -49,6 +54,9 @@ struct prefix_info {
struct in6_addr prefix;
};
+/* rfc4861 4.6.2: IPv6 PIO is 32 bytes in size */
+static_assert(sizeof(struct prefix_info) == 32);
+
#include <linux/ipv6.h>
#include <linux/netdevice.h>
#include <net/if_inet6.h>
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 5531dd08061e..0754c463224a 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -15,6 +15,7 @@ struct key;
struct sock;
struct socket;
struct rxrpc_call;
+struct rxrpc_peer;
enum rxrpc_abort_reason;
enum rxrpc_interruptibility {
@@ -41,13 +42,14 @@ void rxrpc_kernel_new_call_notification(struct socket *,
rxrpc_notify_new_call_t,
rxrpc_discard_new_call_t);
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
- struct sockaddr_rxrpc *srx,
+ struct rxrpc_peer *peer,
struct key *key,
unsigned long user_call_ID,
s64 tx_total_len,
u32 hard_timeout,
gfp_t gfp,
rxrpc_notify_rx_t notify_rx,
+ u16 service_id,
bool upgrade,
enum rxrpc_interruptibility interruptibility,
unsigned int debug_id);
@@ -60,9 +62,14 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
u32, int, enum rxrpc_abort_reason);
void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call);
void rxrpc_kernel_put_call(struct socket *sock, struct rxrpc_call *call);
-void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
- struct sockaddr_rxrpc *);
-bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
+struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock,
+ struct sockaddr_rxrpc *srx, gfp_t gfp);
+void rxrpc_kernel_put_peer(struct rxrpc_peer *peer);
+struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer);
+struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call);
+const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer);
+const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer);
+unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 824c258143a3..afd40dce40f3 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -46,12 +46,6 @@ struct scm_stat {
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
-#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
-#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
-#define unix_state_lock_nested(s) \
- spin_lock_nested(&unix_sk(s)->lock, \
- SINGLE_DEPTH_NESTING)
-
/* The AF_UNIX socket */
struct unix_sock {
/* WARNING: sk has to be the first member */
@@ -75,6 +69,21 @@ struct unix_sock {
};
#define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk)
+#define unix_peer(sk) (unix_sk(sk)->peer)
+
+#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
+#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock)
+enum unix_socket_lock_class {
+ U_LOCK_NORMAL,
+ U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
+ U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+};
+
+static inline void unix_state_lock_nested(struct sock *sk,
+ enum unix_socket_lock_class subclass)
+{
+ spin_lock_nested(&unix_sk(sk)->lock, subclass);
+}
#define peer_wait peer_wq.wait
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 0e7504a42925..535701efc1e5 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -137,7 +137,6 @@ struct vsock_transport {
u64 (*stream_rcvhiwat)(struct vsock_sock *);
bool (*stream_is_active)(struct vsock_sock *);
bool (*stream_allow)(u32 cid, u32 port);
- int (*set_rcvlowat)(struct vsock_sock *vsk, int val);
/* SEQ_PACKET. */
ssize_t (*seqpacket_dequeue)(struct vsock_sock *vsk, struct msghdr *msg,
@@ -168,6 +167,7 @@ struct vsock_transport {
struct vsock_transport_send_notify_data *);
/* sk_lock held by the caller */
void (*notify_buffer_size)(struct vsock_sock *, u64 *);
+ int (*notify_set_rcvlowat)(struct vsock_sock *vsk, int val);
/* Shutdown. */
int (*shutdown)(struct vsock_sock *, int);
@@ -177,6 +177,9 @@ struct vsock_transport {
/* Read a single skb */
int (*read_skb)(struct vsock_sock *, skb_read_actor_t);
+
+ /* Zero-copy. */
+ bool (*msgzerocopy_allow)(void);
};
/**** CORE ****/
@@ -201,7 +204,6 @@ static inline bool __vsock_in_connected_table(struct vsock_sock *vsk)
return !list_empty(&vsk->connected_table);
}
-void vsock_release_pending(struct sock *pending);
void vsock_add_pending(struct sock *listener, struct sock *pending);
void vsock_remove_pending(struct sock *listener, struct sock *pending);
void vsock_enqueue_accept(struct sock *listener, struct sock *connected);
@@ -225,7 +227,6 @@ struct vsock_tap {
struct list_head list;
};
-int vsock_init_tap(void);
int vsock_add_tap(struct vsock_tap *vt);
int vsock_remove_tap(struct vsock_tap *vt);
void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
@@ -243,4 +244,8 @@ static inline void __init vsock_bpf_build_proto(void)
{}
#endif
+static inline bool vsock_msgzerocopy_allow(const struct vsock_transport *t)
+{
+ return t->msgzerocopy_allow && t->msgzerocopy_allow();
+}
#endif /* __AF_VSOCK_H__ */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 1b4230cd42a3..7ffa8c192c3f 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -185,7 +185,7 @@ struct bt_iso_ucast_qos {
struct bt_iso_bcast_qos {
__u8 big;
__u8 bis;
- __u8 sync_interval;
+ __u8 sync_factor;
__u8 packing;
__u8 framing;
struct bt_iso_io_qos in;
@@ -386,6 +386,7 @@ struct bt_sock {
enum {
BT_SK_DEFER_SETUP,
BT_SK_SUSPEND,
+ BT_SK_PKT_STATUS
};
struct bt_sock_list {
@@ -400,6 +401,8 @@ int bt_sock_register(int proto, const struct net_proto_family *ops);
void bt_sock_unregister(int proto);
void bt_sock_link(struct bt_sock_list *l, struct sock *s);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
+struct sock *bt_sock_alloc(struct net *net, struct socket *sock,
+ struct proto *prot, int proto, gfp_t prio, int kern);
int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
@@ -430,10 +433,6 @@ struct l2cap_ctrl {
struct l2cap_chan *chan;
};
-struct sco_ctrl {
- u8 pkt_status;
-};
-
struct hci_dev;
typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
@@ -464,16 +463,18 @@ struct bt_skb_cb {
u8 force_active;
u16 expect;
u8 incoming:1;
+ u8 pkt_status:2;
union {
struct l2cap_ctrl l2cap;
- struct sco_ctrl sco;
struct hci_ctrl hci;
struct mgmt_ctrl mgmt;
+ struct scm_creds creds;
};
};
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
+#define hci_skb_pkt_status(skb) bt_cb((skb))->pkt_status
#define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
#define hci_skb_event(skb) bt_cb((skb))->hci.req_event
@@ -540,7 +541,7 @@ static inline struct sk_buff *bt_skb_sendmsg(struct sock *sk,
return ERR_PTR(-EFAULT);
}
- skb->priority = sk->sk_priority;
+ skb->priority = READ_ONCE(sk->sk_priority);
return skb;
}
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 872dcb91a540..bdee5d649cc6 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -1,6 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
+ Copyright 2023 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -309,6 +310,26 @@ enum {
* to support it.
*/
HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT,
+
+ /* When this quirk is set, MSFT extension monitor tracking by
+ * address filter is supported. Since tracking quantity of each
+ * pattern is limited, this feature supports tracking multiple
+ * devices concurrently if controller supports multiple
+ * address filters.
+ *
+ * This quirk must be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER,
+
+ /*
+ * When this quirk is set, LE Coded PHY shall not be used. This is
+ * required for some Intel controllers which erroneously claim to
+ * support it but it causes problems with extended scanning.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_LE_CODED,
};
/* HCI device flags */
@@ -577,6 +598,7 @@ enum {
#define HCI_LE_CIS_CENTRAL 0x10
#define HCI_LE_CIS_PERIPHERAL 0x20
#define HCI_LE_ISO_BROADCASTER 0x40
+#define HCI_LE_ISO_SYNC_RECEIVER 0x80
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -652,6 +674,8 @@ enum {
#define HCI_TX_POWER_INVALID 127
#define HCI_RSSI_INVALID 127
+#define HCI_SYNC_HANDLE_INVALID 0xffff
+
#define HCI_ROLE_MASTER 0x00
#define HCI_ROLE_SLAVE 0x01
@@ -2760,6 +2784,17 @@ struct hci_ev_le_enh_conn_complete {
__u8 clk_accurancy;
} __packed;
+#define HCI_EV_LE_PER_ADV_REPORT 0x0f
+struct hci_ev_le_per_adv_report {
+ __le16 sync_handle;
+ __u8 tx_power;
+ __u8 rssi;
+ __u8 cte_type;
+ __u8 data_status;
+ __u8 length;
+ __u8 data[];
+} __packed;
+
#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
struct hci_evt_le_ext_adv_set_term {
__u8 status;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 9654567cfae3..8f8dd9173714 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -83,7 +83,7 @@ struct discovery_state {
u8 last_adv_addr_type;
s8 last_adv_rssi;
u32 last_adv_flags;
- u8 last_adv_data[HCI_MAX_AD_LENGTH];
+ u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH];
u8 last_adv_data_len;
bool report_invalid_rssi;
bool result_filtering;
@@ -189,6 +189,7 @@ struct blocked_key {
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 link_type;
u8 type;
u8 val[16];
};
@@ -198,6 +199,7 @@ struct smp_ltk {
struct rcu_head rcu;
bdaddr_t bdaddr;
u8 bdaddr_type;
+ u8 link_type;
u8 authenticated;
u8 type;
u8 enc_size;
@@ -212,6 +214,7 @@ struct smp_irk {
bdaddr_t rpa;
bdaddr_t bdaddr;
u8 addr_type;
+ u8 link_type;
u8 val[16];
};
@@ -219,6 +222,8 @@ struct link_key {
struct list_head list;
struct rcu_head rcu;
bdaddr_t bdaddr;
+ u8 bdaddr_type;
+ u8 link_type;
u8 type;
u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
@@ -290,7 +295,7 @@ struct adv_pattern {
__u8 ad_type;
__u8 offset;
__u8 length;
- __u8 value[HCI_MAX_AD_LENGTH];
+ __u8 value[HCI_MAX_EXT_AD_LENGTH];
};
struct adv_rssi_thresholds {
@@ -321,8 +326,8 @@ struct adv_monitor {
#define HCI_MAX_SHORT_NAME_LENGTH 10
-#define HCI_CONN_HANDLE_UNSET 0xffff
#define HCI_CONN_HANDLE_MAX 0x0eff
+#define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX)
/* Min encryption key size to match with SMP */
#define HCI_MIN_ENC_KEY_SIZE 7
@@ -350,7 +355,9 @@ struct hci_dev {
struct list_head list;
struct mutex lock;
- char name[8];
+ struct ida unset_handle_ida;
+
+ const char *name;
unsigned long flags;
__u16 id;
__u8 bus;
@@ -532,7 +539,6 @@ struct hci_dev {
struct work_struct tx_work;
struct delayed_work le_scan_disable;
- struct delayed_work le_scan_restart;
struct sk_buff_head rx_q;
struct sk_buff_head raw_q;
@@ -593,9 +599,7 @@ struct hci_dev {
const char *fw_info;
struct dentry *debugfs;
-#ifdef CONFIG_DEV_COREDUMP
struct hci_devcoredump dump;
-#endif
struct device dev;
@@ -728,7 +732,7 @@ struct hci_conn {
__u16 le_conn_interval;
__u16 le_conn_latency;
__u16 le_supv_timeout;
- __u8 le_adv_data[HCI_MAX_AD_LENGTH];
+ __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH];
__u8 le_adv_data_len;
__u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH];
__u8 le_per_adv_data_len;
@@ -741,6 +745,7 @@ struct hci_conn {
unsigned long flags;
enum conn_reasons conn_reason;
+ __u8 abort_reason;
__u32 clock;
__u16 clock_accuracy;
@@ -760,7 +765,6 @@ struct hci_conn {
struct delayed_work auto_accept_work;
struct delayed_work idle_work;
struct delayed_work le_conn_timeout;
- struct work_struct le_scan_cleanup;
struct device dev;
struct dentry *debugfs;
@@ -822,6 +826,7 @@ struct hci_conn_params {
struct hci_conn *conn;
bool explicit_connect;
+ /* Accessed without hdev->lock: */
hci_conn_flags_t flags;
u8 privacy_mode;
};
@@ -951,7 +956,6 @@ void hci_inquiry_cache_flush(struct hci_dev *hdev);
/* ----- HCI Connections ----- */
enum {
HCI_CONN_AUTH_PEND,
- HCI_CONN_REAUTH_PEND,
HCI_CONN_ENCRYPT_PEND,
HCI_CONN_RSWITCH_PEND,
HCI_CONN_MODE_CHANGE_PEND,
@@ -975,6 +979,12 @@ enum {
HCI_CONN_SCANNING,
HCI_CONN_AUTH_FAILURE,
HCI_CONN_PER_ADV,
+ HCI_CONN_BIG_CREATED,
+ HCI_CONN_CREATE_CIS,
+ HCI_CONN_BIG_SYNC,
+ HCI_CONN_BIG_SYNC_FAILED,
+ HCI_CONN_PA_SYNC,
+ HCI_CONN_PA_SYNC_FAILED,
};
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -1094,8 +1104,7 @@ static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
}
static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
- bdaddr_t *ba,
- __u8 big, __u8 bis)
+ bdaddr_t *ba, __u8 bis)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -1106,7 +1115,33 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev,
if (bacmp(&c->dst, ba) || c->type != ISO_LINK)
continue;
- if (c->iso_qos.bcast.big == big && c->iso_qos.bcast.bis == bis) {
+ if (c->iso_qos.bcast.bis == bis) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
+hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev,
+ bdaddr_t *ba,
+ __u8 big, __u8 bis)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, ba) || c->type != ISO_LINK ||
+ !test_bit(HCI_CONN_PER_ADV, &c->flags))
+ continue;
+
+ if (c->iso_qos.bcast.big == big &&
+ c->iso_qos.bcast.bis == bis) {
rcu_read_unlock();
return c;
}
@@ -1191,15 +1226,15 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK)
+ if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
continue;
/* Match CIG ID if set */
- if (cig != BT_ISO_QOS_CIG_UNSET && cig != c->iso_qos.ucast.cig)
+ if (cig != c->iso_qos.ucast.cig)
continue;
/* Match CIS ID if set */
- if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis)
+ if (id != c->iso_qos.ucast.cis)
continue;
/* Match destination address if set */
@@ -1223,7 +1258,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev,
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK)
+ if (c->type != ISO_LINK || !bacmp(&c->dst, BDADDR_ANY))
continue;
if (handle == c->iso_qos.ucast.cig) {
@@ -1260,6 +1295,76 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev,
return NULL;
}
+static inline struct hci_conn *
+hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK ||
+ c->state != state)
+ continue;
+
+ if (handle == c->iso_qos.bcast.big) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
+hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK ||
+ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
+ continue;
+
+ if (c->iso_qos.bcast.big == big) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+static inline struct hci_conn *
+hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type != ISO_LINK ||
+ !test_bit(HCI_CONN_PA_SYNC, &c->flags))
+ continue;
+
+ if (c->sync_handle == sync_handle) {
+ rcu_read_unlock();
+ return c;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
__u8 type, __u16 state)
{
@@ -1301,6 +1406,26 @@ static inline void hci_conn_hash_list_state(struct hci_dev *hdev,
rcu_read_unlock();
}
+static inline void hci_conn_hash_list_flag(struct hci_dev *hdev,
+ hci_conn_func_t func, __u8 type,
+ __u8 flag, void *data)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ if (!func)
+ return;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == type && test_bit(flag, &c->flags))
+ func(c, data);
+ }
+
+ rcu_read_unlock();
+}
+
static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
{
struct hci_conn_hash *h = &hdev->conn_hash;
@@ -1321,14 +1446,38 @@ static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
return NULL;
}
+/* Returns true if an le connection is in the scanning state */
+static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return false;
+}
+
int hci_disconnect(struct hci_conn *conn, __u8 reason);
bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
bool hci_iso_setup_path(struct hci_conn *conn);
-int hci_le_create_cis(struct hci_conn *conn);
+int hci_le_create_cis_pending(struct hci_dev *hdev);
+int hci_conn_check_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
- u8 role);
+ u8 role, u16 handle);
+struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
+ bdaddr_t *dst, u8 role);
void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
@@ -1352,6 +1501,9 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u16 setting, struct bt_codec *codec);
struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos);
+struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ struct bt_iso_qos *qos,
+ __u8 base_len, __u8 *base);
struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, struct bt_iso_qos *qos);
struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
@@ -1359,7 +1511,8 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 data_len, __u8 *data);
int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
__u8 sid, struct bt_iso_qos *qos);
-int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
+int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
+ struct bt_iso_qos *qos,
__u16 sync_handle, __u8 num_bis, __u8 bis[]);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
@@ -1370,6 +1523,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
void hci_conn_failed(struct hci_conn *conn, u8 status);
+u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle);
/*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
@@ -1573,7 +1727,11 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type);
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
void hci_conn_params_clear_disabled(struct hci_dev *hdev);
+void hci_conn_params_free(struct hci_conn_params *param);
+void hci_pend_le_list_del_init(struct hci_conn_params *param);
+void hci_pend_le_list_add(struct hci_conn_params *param,
+ struct list_head *list);
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
bdaddr_t *addr,
u8 addr_type);
@@ -1710,7 +1868,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
-#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED))
+#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \
+ !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \
+ &(dev)->quirks))
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
@@ -1742,6 +1902,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+/* Maximum advertising length */
+#define max_adv_len(dev) \
+ (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH)
+
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
*
* C24: Mandatory if the LE Controller supports Connection State and either
@@ -1762,6 +1926,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define cis_peripheral_capable(dev) \
((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
#define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
+#define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER)
#define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
(!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks)))
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 2d5fcda1bcd0..082f89531b88 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -56,7 +56,7 @@ struct hci_mon_new_index {
__u8 type;
__u8 bus;
bdaddr_t bdaddr;
- char name[8];
+ char name[8] __nonstring;
} __packed;
#define HCI_MON_NEW_INDEX_SIZE 16
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 2495be4d8b82..6efbc2152146 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -5,6 +5,9 @@
* Copyright (C) 2021 Intel Corporation
*/
+#define UINT_PTR(_handle) ((void *)((uintptr_t)_handle))
+#define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr))
+
typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
int err);
@@ -77,6 +80,8 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
u8 *data, u32 flags, u16 min_interval,
u16 max_interval, u16 sync_interval);
+int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance);
+
int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
u8 instance, bool force);
int hci_disable_advertising_sync(struct hci_dev *hdev);
@@ -124,7 +129,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
-int hci_le_create_cis_sync(struct hci_dev *hdev, struct hci_conn *conn);
+int hci_le_create_cis_sync(struct hci_dev *hdev);
int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index a5801649f619..d382679efd2b 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -111,6 +111,8 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_WIDEBAND_SPEECH BIT(17)
#define MGMT_SETTING_CIS_CENTRAL BIT(18)
#define MGMT_SETTING_CIS_PERIPHERAL BIT(19)
+#define MGMT_SETTING_ISO_BROADCASTER BIT(20)
+#define MGMT_SETTING_ISO_SYNC_RECEIVER BIT(21)
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -979,6 +981,7 @@ struct mgmt_ev_auth_failed {
#define MGMT_DEV_FOUND_NOT_CONNECTABLE BIT(2)
#define MGMT_DEV_FOUND_INITIATED_CONN BIT(3)
#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED BIT(4)
+#define MGMT_DEV_FOUND_SCAN_RSP BIT(5)
#define MGMT_EV_DEVICE_FOUND 0x0012
struct mgmt_ev_device_found {
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 1aa2e14b6c94..f40ddb4264fc 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -46,6 +46,4 @@ struct sco_conninfo {
__u8 dev_class[3];
};
-#define SCO_CMSG_PKT_STATUS 0x01
-
#endif /* __SCO_H */
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index a016f275cb01..c5e57c6bd873 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -301,7 +301,6 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
-void bond_3ad_update_lacp_active(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
void bond_3ad_update_ad_actor_settings(struct bonding *bond);
int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 59955ac33157..5b8b1b644a2d 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-1.0+ */
/*
* Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
*
@@ -7,9 +8,6 @@
* BUT, I'm the one who modified it for ethernet, so:
* (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
*
- * This software may be used and distributed according to the terms
- * of the GNU Public License, incorporated herein by reference.
- *
*/
#ifndef _NET_BONDING_H
@@ -279,7 +277,7 @@ struct bond_vlan_tag {
unsigned short vlan_id;
};
-/**
+/*
* Returns NULL if the net_device does not belong to any of the bond's slaves
*
* Caller must hold bond lock for read
@@ -724,23 +722,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond,
}
/* Caller must hold rcu_read_lock() for read */
-static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac)
+static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac)
{
struct list_head *iter;
struct slave *tmp;
- struct netdev_hw_addr *ha;
bond_for_each_slave_rcu(bond, tmp, iter)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return true;
-
- if (netdev_uc_empty(bond->dev))
- return false;
-
- netdev_for_each_uc_addr(ha, bond->dev)
- if (ether_addr_equal_64bits(mac, ha->addr))
- return true;
-
return false;
}
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index f90f0021f5f2..4dabeb6c76d3 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -16,6 +16,7 @@
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include <net/ip.h>
+#include <net/xdp.h>
/* 0 - Reserved to indicate value not set
* 1..NR_CPUS - Reserved for sender_cpu
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
index bd5440977f7f..5ee7b322e18b 100644
--- a/include/net/caif/cfsrvl.h
+++ b/include/net/caif/cfsrvl.h
@@ -33,9 +33,6 @@ struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info,
int mtu_size);
struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
-void cfsrvl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
- int phyid);
-
bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
void cfsrvl_init(struct cfsrvl *service,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9e04f69712b1..2b54fdd8ca15 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7,7 +7,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2021, 2023 Intel Corporation
*/
#include <linux/ethtool.h>
@@ -52,7 +52,7 @@
* such wiphy can have zero, one, or many virtual interfaces associated with
* it, which need to be identified as such by pointing the network interface's
* @ieee80211_ptr pointer to a &struct wireless_dev which further describes
- * the wireless part of the interface, normally this struct is embedded in the
+ * the wireless part of the interface. Normally this struct is embedded in the
* network interface's private data area. Drivers can optionally allow creating
* or destroying virtual interfaces on the fly, but without at least one or the
* ability to create some the wireless device isn't useful.
@@ -76,6 +76,8 @@ struct wiphy;
* @IEEE80211_CHAN_DISABLED: This channel is disabled.
* @IEEE80211_CHAN_NO_IR: do not initiate radiation, this includes
* sending probe requests or beaconing.
+ * @IEEE80211_CHAN_PSD: Power spectral density (in dBm) is set for this
+ * channel.
* @IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
* @IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
* is not permitted.
@@ -115,11 +117,16 @@ struct wiphy;
* This may be due to the driver or due to regulatory bandwidth
* restrictions.
* @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel.
+ * @IEEE80211_CHAN_DFS_CONCURRENT: See %NL80211_RRF_DFS_CONCURRENT
+ * @IEEE80211_CHAN_NO_UHB_VLP_CLIENT: Client connection with VLP AP
+ * not permitted using this channel
+ * @IEEE80211_CHAN_NO_UHB_AFC_CLIENT: Client connection with AFC AP
+ * not permitted using this channel
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
IEEE80211_CHAN_NO_IR = 1<<1,
- /* hole at 1<<2 */
+ IEEE80211_CHAN_PSD = 1<<2,
IEEE80211_CHAN_RADAR = 1<<3,
IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
@@ -138,6 +145,9 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_16MHZ = 1<<18,
IEEE80211_CHAN_NO_320MHZ = 1<<19,
IEEE80211_CHAN_NO_EHT = 1<<20,
+ IEEE80211_CHAN_DFS_CONCURRENT = 1<<21,
+ IEEE80211_CHAN_NO_UHB_VLP_CLIENT= 1<<22,
+ IEEE80211_CHAN_NO_UHB_AFC_CLIENT= 1<<23,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -171,6 +181,7 @@ enum ieee80211_channel_flags {
* on this channel.
* @dfs_state_entered: timestamp (jiffies) when the dfs state was entered.
* @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
+ * @psd: power spectral density (in dBm)
*/
struct ieee80211_channel {
enum nl80211_band band;
@@ -187,6 +198,7 @@ struct ieee80211_channel {
enum nl80211_dfs_state dfs_state;
unsigned long dfs_state_entered;
unsigned int dfs_cac_ms;
+ s8 psd;
};
/**
@@ -263,7 +275,7 @@ enum ieee80211_privacy {
* are only for driver use when pointers to this structure are
* passed around.
*
- * @flags: rate-specific flags
+ * @flags: rate-specific flags from &enum ieee80211_rate_flags
* @bitrate: bitrate in units of 100 Kbps
* @hw_value: driver/hardware value for this rate
* @hw_value_short: driver/hardware value for this rate when
@@ -410,6 +422,19 @@ struct ieee80211_sta_eht_cap {
u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN];
};
+/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
+#ifdef __CHECKER__
+/*
+ * This is used to mark the sband->iftype_data pointer which is supposed
+ * to be an array with special access semantics (per iftype), but a lot
+ * of code got it wrong in the past, so with this marking sparse will be
+ * noisy when the pointer is used directly.
+ */
+# define __iftd __attribute__((noderef, address_space(__iftype_data)))
+#else
+# define __iftd
+#endif /* __CHECKER__ */
+
/**
* struct ieee80211_sband_iftype_data - sband data per interface type
*
@@ -543,10 +568,48 @@ struct ieee80211_supported_band {
struct ieee80211_sta_s1g_cap s1g_cap;
struct ieee80211_edmg edmg_cap;
u16 n_iftype_data;
- const struct ieee80211_sband_iftype_data *iftype_data;
+ const struct ieee80211_sband_iftype_data __iftd *iftype_data;
};
/**
+ * _ieee80211_set_sband_iftype_data - set sband iftype data array
+ * @sband: the sband to initialize
+ * @iftd: the iftype data array pointer
+ * @n_iftd: the length of the iftype data array
+ *
+ * Set the sband iftype data array; use this where the length cannot
+ * be derived from the ARRAY_SIZE() of the argument, but prefer
+ * ieee80211_set_sband_iftype_data() where it can be used.
+ */
+static inline void
+_ieee80211_set_sband_iftype_data(struct ieee80211_supported_band *sband,
+ const struct ieee80211_sband_iftype_data *iftd,
+ u16 n_iftd)
+{
+ sband->iftype_data = (const void __iftd __force *)iftd;
+ sband->n_iftype_data = n_iftd;
+}
+
+/**
+ * ieee80211_set_sband_iftype_data - set sband iftype data array
+ * @sband: the sband to initialize
+ * @iftd: the iftype data array
+ */
+#define ieee80211_set_sband_iftype_data(sband, iftd) \
+ _ieee80211_set_sband_iftype_data(sband, iftd, ARRAY_SIZE(iftd))
+
+/**
+ * for_each_sband_iftype_data - iterate sband iftype data entries
+ * @sband: the sband whose iftype_data array to iterate
+ * @i: iterator counter
+ * @iftd: iftype data pointer to set
+ */
+#define for_each_sband_iftype_data(sband, i, iftd) \
+ for (i = 0, iftd = (const void __force *)&(sband)->iftype_data[i]; \
+ i < (sband)->n_iftype_data; \
+ i++, iftd = (const void __force *)&(sband)->iftype_data[i])
+
+/**
* ieee80211_get_sband_iftype_data - return sband data for a given iftype
* @sband: the sband to search for the STA on
* @iftype: enum nl80211_iftype
@@ -557,15 +620,16 @@ static inline const struct ieee80211_sband_iftype_data *
ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
u8 iftype)
{
+ const struct ieee80211_sband_iftype_data *data;
int i;
if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
return NULL;
- for (i = 0; i < sband->n_iftype_data; i++) {
- const struct ieee80211_sband_iftype_data *data =
- &sband->iftype_data[i];
+ if (iftype == NL80211_IFTYPE_AP_VLAN)
+ iftype = NL80211_IFTYPE_AP;
+ for_each_sband_iftype_data(sband, i, data) {
if (data->types_mask & BIT(iftype))
return data;
}
@@ -808,7 +872,7 @@ struct cfg80211_tid_cfg {
struct cfg80211_tid_config {
const u8 *peer;
u32 n_tid_conf;
- struct cfg80211_tid_cfg tid_conf[];
+ struct cfg80211_tid_cfg tid_conf[] __counted_by(n_tid_conf);
};
/**
@@ -921,6 +985,15 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1,
const struct cfg80211_chan_def *chandef2);
/**
+ * nl80211_chan_width_to_mhz - get the channel width in MHz
+ * @chan_width: the channel width from &enum nl80211_chan_width
+ *
+ * Return: channel width in MHz if the chan_width from &enum nl80211_chan_width
+ * is valid. -1 otherwise.
+ */
+int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width);
+
+/**
* cfg80211_chandef_valid - check if a channel definition is valid
* @chandef: the channel definition to check
* Return: %true if the channel definition is valid. %false otherwise.
@@ -951,6 +1024,30 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
enum nl80211_iftype iftype);
/**
+ * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable and we
+ * can/need start CAC on such channel
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ *
+ * Return: true if all channels available and at least
+ * one channel requires CAC (NL80211_DFS_USABLE)
+ */
+bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef);
+
+/**
+ * cfg80211_chandef_dfs_cac_time - get the DFS CAC time (in ms) for given
+ * channel definition
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ *
+ * Returns: DFS CAC time (in ms) which applies for this channel definition
+ */
+unsigned int
+cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef);
+
+/**
* nl80211_send_chandef - sends the channel definition.
* @msg: the msg to send channel definition
* @chandef: the channel definition to check
@@ -1184,7 +1281,7 @@ struct cfg80211_mbssid_elems {
struct {
const u8 *data;
size_t len;
- } elem[];
+ } elem[] __counted_by(cnt);
};
/**
@@ -1201,7 +1298,7 @@ struct cfg80211_rnr_elems {
struct {
const u8 *data;
size_t len;
- } elem[];
+ } elem[] __counted_by(cnt);
};
/**
@@ -1279,13 +1376,14 @@ struct cfg80211_acl_data {
int n_acl_entries;
/* Keep it last */
- struct mac_address mac_addrs[];
+ struct mac_address mac_addrs[] __counted_by(n_acl_entries);
};
/**
* struct cfg80211_fils_discovery - FILS discovery parameters from
* IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
*
+ * @update: Set to true if the feature configuration should be updated.
* @min_interval: Minimum packet interval in TUs (0 - 10000)
* @max_interval: Maximum packet interval in TUs (0 - 10000)
* @tmpl_len: Template length
@@ -1293,6 +1391,7 @@ struct cfg80211_acl_data {
* frame headers.
*/
struct cfg80211_fils_discovery {
+ bool update;
u32 min_interval;
u32 max_interval;
size_t tmpl_len;
@@ -1303,6 +1402,7 @@ struct cfg80211_fils_discovery {
* struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe
* response parameters in 6GHz.
*
+ * @update: Set to true if the feature configuration should be updated.
* @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned
* in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive
* scanning
@@ -1310,6 +1410,7 @@ struct cfg80211_fils_discovery {
* @tmpl: Template data for probe response
*/
struct cfg80211_unsol_bcast_probe_resp {
+ bool update;
u32 interval;
size_t tmpl_len;
const u8 *tmpl;
@@ -1350,7 +1451,7 @@ struct cfg80211_unsol_bcast_probe_resp {
* @twt_responder: Enable Target Wait Time
* @he_required: stations must support HE
* @sae_h2e_required: stations must support direct H2E technique in SAE
- * @flags: flags, as defined in enum cfg80211_ap_settings_flags
+ * @flags: flags, as defined in &enum nl80211_ap_settings_flags
* @he_obss_pd: OBSS Packet Detection settings
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
* @fils_discovery: FILS discovery transmission parameters
@@ -1396,6 +1497,22 @@ struct cfg80211_ap_settings {
u16 punct_bitmap;
};
+
+/**
+ * struct cfg80211_ap_update - AP configuration update
+ *
+ * Subset of &struct cfg80211_ap_settings, for updating a running AP.
+ *
+ * @beacon: beacon data
+ * @fils_discovery: FILS discovery transmission parameters
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
+ */
+struct cfg80211_ap_update {
+ struct cfg80211_beacon_data beacon;
+ struct cfg80211_fils_discovery fils_discovery;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
+};
+
/**
* struct cfg80211_csa_settings - channel switch settings
*
@@ -1479,7 +1596,6 @@ struct iface_combination_params {
* @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
* @STATION_PARAM_APPLY_CAPABILITY: apply new capability
* @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state
- * @STATION_PARAM_APPLY_STA_TXPOWER: apply tx power for STA
*
* Not all station parameters have in-band "no change" signalling,
* for those that don't these flags will are used.
@@ -1566,6 +1682,21 @@ struct link_station_del_parameters {
};
/**
+ * struct cfg80211_ttlm_params: TID to link mapping parameters
+ *
+ * Used for setting a TID to link mapping.
+ *
+ * @dlink: Downlink TID to link mapping, as defined in section 9.4.2.314
+ * (TID-To-Link Mapping element) in Draft P802.11be_D4.0.
+ * @ulink: Uplink TID to link mapping, as defined in section 9.4.2.314
+ * (TID-To-Link Mapping element) in Draft P802.11be_D4.0.
+ */
+struct cfg80211_ttlm_params {
+ u16 dlink[8];
+ u16 ulink[8];
+};
+
+/**
* struct station_parameters - station parameters
*
* Used to change and create a new station.
@@ -1702,6 +1833,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
* @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS
* @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information
+ * @RATE_INFO_FLAGS_S1G_MCS: MCS field filled with S1G MCS
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
@@ -1712,6 +1844,7 @@ enum rate_info_flags {
RATE_INFO_FLAGS_EDMG = BIT(5),
RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6),
RATE_INFO_FLAGS_EHT_MCS = BIT(7),
+ RATE_INFO_FLAGS_S1G_MCS = BIT(8),
};
/**
@@ -1728,6 +1861,11 @@ enum rate_info_flags {
* @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
* @RATE_INFO_BW_320: 320 MHz bandwidth
* @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation
+ * @RATE_INFO_BW_1: 1 MHz bandwidth
+ * @RATE_INFO_BW_2: 2 MHz bandwidth
+ * @RATE_INFO_BW_4: 4 MHz bandwidth
+ * @RATE_INFO_BW_8: 8 MHz bandwidth
+ * @RATE_INFO_BW_16: 16 MHz bandwidth
*/
enum rate_info_bw {
RATE_INFO_BW_20 = 0,
@@ -1739,6 +1877,11 @@ enum rate_info_bw {
RATE_INFO_BW_HE_RU,
RATE_INFO_BW_320,
RATE_INFO_BW_EHT_RU,
+ RATE_INFO_BW_1,
+ RATE_INFO_BW_2,
+ RATE_INFO_BW_4,
+ RATE_INFO_BW_8,
+ RATE_INFO_BW_16,
};
/**
@@ -1747,8 +1890,8 @@ enum rate_info_bw {
* Information about a receiving or transmitting bitrate
*
* @flags: bitflag of flags from &enum rate_info_flags
- * @mcs: mcs index if struct describes an HT/VHT/HE rate
* @legacy: bitrate in 100kbit/s for 802.11abg
+ * @mcs: mcs index if struct describes an HT/VHT/HE/EHT/S1G rate
* @nss: number of streams (VHT & HE only)
* @bw: bandwidth (from &enum rate_info_bw)
* @he_gi: HE guard interval (from &enum nl80211_he_gi)
@@ -1761,9 +1904,9 @@ enum rate_info_bw {
* only valid if bw is %RATE_INFO_BW_EHT_RU)
*/
struct rate_info {
- u8 flags;
- u8 mcs;
+ u16 flags;
u16 legacy;
+ u8 mcs;
u8 nss;
u8 bw;
u8 he_gi;
@@ -2141,7 +2284,7 @@ enum mpath_info_flags {
* @sn: target sequence number
* @metric: metric (cost) of this mesh path
* @exptime: expiration time for the mesh path from now, in msecs
- * @flags: mesh path flags
+ * @flags: mesh path flags from &enum mesh_path_flags
* @discovery_timeout: total mesh path discovery timeout, in msecs
* @discovery_retries: mesh path discovery retries
* @generation: generation number for nl80211 dumps.
@@ -2332,7 +2475,7 @@ struct mesh_config {
* @user_mpm: userspace handles all MPM functions
* @dtim_period: DTIM period to use
* @beacon_interval: beacon interval to use
- * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
+ * @mcast_rate: multicast rate for Mesh Node [6Mbps is the default for 802.11a]
* @basic_rates: basic rates to use when creating the mesh
* @beacon_rate: bitrate to be used for beacons
* @userspace_handles_dfs: whether user space controls DFS operation, i.e.
@@ -2449,11 +2592,12 @@ struct cfg80211_scan_info {
* @short_ssid: short ssid to scan for
* @bssid: bssid to scan for
* @channel_idx: idx of the channel in the channel array in the scan request
- * which the above info relvant to
+ * which the above info is relevant to
* @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU
* @short_ssid_valid: @short_ssid is valid and can be used
* @psc_no_listen: when set, and the channel is a PSC channel, no need to wait
* 20 TUs before starting to send probe requests.
+ * @psd_20: The AP's 20 MHz PSD value.
*/
struct cfg80211_scan_6ghz_params {
u32 short_ssid;
@@ -2462,6 +2606,7 @@ struct cfg80211_scan_6ghz_params {
bool unsolicited_probe;
bool short_ssid_valid;
bool psc_no_listen;
+ s8 psd_20;
};
/**
@@ -2471,7 +2616,6 @@ struct cfg80211_scan_6ghz_params {
* @n_ssids: number of SSIDs
* @channels: channels to scan on.
* @n_channels: total number of channels to scan
- * @scan_width: channel width for scanning
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
* @duration: how long to listen on each channel, in TUs. If
@@ -2479,7 +2623,7 @@ struct cfg80211_scan_6ghz_params {
* the actual dwell time may be shorter.
* @duration_mandatory: if set, the scan duration must be as specified by the
* %duration field.
- * @flags: bit field of flags controlling operation
+ * @flags: control flags from &enum nl80211_scan_flags
* @rates: bitmap of rates to advertise for each band
* @wiphy: the wiphy this was for
* @scan_start: time (in jiffies) when the scan started
@@ -2496,12 +2640,13 @@ struct cfg80211_scan_6ghz_params {
* @n_6ghz_params: number of 6 GHz params
* @scan_6ghz_params: 6 GHz params
* @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
+ * @tsf_report_link_id: for MLO, indicates the link ID of the BSS that should be
+ * used for TSF reporting. Can be set to -1 to indicate no preference.
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
- enum nl80211_bss_scan_width scan_width;
const u8 *ie;
size_t ie_len;
u16 duration;
@@ -2525,9 +2670,10 @@ struct cfg80211_scan_request {
bool scan_6ghz;
u32 n_6ghz_params;
struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+ s8 tsf_report_link_id;
/* keep last */
- struct ieee80211_channel *channels[];
+ struct ieee80211_channel *channels[] __counted_by(n_channels);
};
static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
@@ -2550,7 +2696,7 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
* or no match (RSSI only)
* @rssi_thold: don't report scan results below this threshold (in s32 dBm)
* @per_band_rssi_thold: Minimum rssi threshold for each band to be applied
- * for filtering out scan results received. Drivers advertize this support
+ * for filtering out scan results received. Drivers advertise this support
* of band specific rssi based filtering through the feature capability
* %NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band
* specific rssi thresholds take precedence over rssi_thold, if specified.
@@ -2596,14 +2742,13 @@ struct cfg80211_bss_select_adjust {
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
* @n_ssids: number of SSIDs
* @n_channels: total number of channels to scan
- * @scan_width: channel width for scanning
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
- * @flags: bit field of flags controlling operation
+ * @flags: control flags from &enum nl80211_scan_flags
* @match_sets: sets of parameters to be matched for a scan result
* entry to be considered valid and to be passed to the host
* (others are filtered out).
- * If ommited, all results are passed.
+ * If omitted, all results are passed.
* @n_match_sets: number of match sets
* @report_results: indicates that results were reported for this request
* @wiphy: the wiphy this was for
@@ -2637,14 +2782,13 @@ struct cfg80211_bss_select_adjust {
* to the specified band while deciding whether a better BSS is reported
* using @relative_rssi. If delta is a negative number, the BSSs that
* belong to the specified band will be penalized by delta dB in relative
- * comparisions.
+ * comparisons.
*/
struct cfg80211_sched_scan_request {
u64 reqid;
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
- enum nl80211_bss_scan_width scan_width;
const u8 *ie;
size_t ie_len;
u32 flags;
@@ -2692,7 +2836,6 @@ enum cfg80211_signal_type {
/**
* struct cfg80211_inform_bss - BSS inform data
* @chan: channel the frame was received on
- * @scan_width: scan width that was used
* @signal: signal strength value, according to the wiphy's
* signal type
* @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was
@@ -2708,16 +2851,28 @@ enum cfg80211_signal_type {
* the BSS that requested the scan in which the beacon/probe was received.
* @chains: bitmask for filled values in @chain_signal.
* @chain_signal: per-chain signal strength of last received BSS in dBm.
+ * @restrict_use: restrict usage, if not set, assume @use_for is
+ * %NL80211_BSS_USE_FOR_NORMAL.
+ * @use_for: bitmap of possible usage for this BSS, see
+ * &enum nl80211_bss_use_for
+ * @cannot_use_reasons: the reasons (bitmap) for not being able to connect,
+ * if @restrict_use is set and @use_for is zero (empty); may be 0 for
+ * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons
+ * @drv_data: Data to be passed through to @inform_bss
*/
struct cfg80211_inform_bss {
struct ieee80211_channel *chan;
- enum nl80211_bss_scan_width scan_width;
s32 signal;
u64 boottime_ns;
u64 parent_tsf;
u8 parent_bssid[ETH_ALEN] __aligned(2);
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+
+ u8 restrict_use:1, use_for:7;
+ u8 cannot_use_reasons;
+
+ void *drv_data;
};
/**
@@ -2743,7 +2898,6 @@ struct cfg80211_bss_ies {
* for use in scan results and similar.
*
* @channel: channel this BSS is on
- * @scan_width: width of the control channel
* @bssid: BSSID of the BSS
* @beacon_interval: the beacon interval as from the frame
* @capability: the capability field in host byte order
@@ -2756,6 +2910,8 @@ struct cfg80211_bss_ies {
* own the beacon_ies, but they're just pointers to the ones from the
* @hidden_beacon_bss struct)
* @proberesp_ies: the information elements from the last Probe Response frame
+ * @proberesp_ecsa_stuck: ECSA element is stuck in the Probe Response frame,
+ * cannot rely on it having valid data
* @hidden_beacon_bss: in case this BSS struct represents a probe response from
* a BSS that hides the SSID in its beacon, this points to the BSS struct
* that holds the beacon data. @beacon_ies is still valid, of course, and
@@ -2769,11 +2925,15 @@ struct cfg80211_bss_ies {
* @chain_signal: per-chain signal strength of last received BSS in dBm.
* @bssid_index: index in the multiple BSS set
* @max_bssid_indicator: max number of members in the BSS set
+ * @use_for: bitmap of possible usage for this BSS, see
+ * &enum nl80211_bss_use_for
+ * @cannot_use_reasons: the reasons (bitmap) for not being able to connect,
+ * if @restrict_use is set and @use_for is zero (empty); may be 0 for
+ * unspecified reasons; see &enum nl80211_bss_cannot_use_reasons
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
struct ieee80211_channel *channel;
- enum nl80211_bss_scan_width scan_width;
const struct cfg80211_bss_ies __rcu *ies;
const struct cfg80211_bss_ies __rcu *beacon_ies;
@@ -2792,9 +2952,14 @@ struct cfg80211_bss {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+ u8 proberesp_ecsa_stuck:1;
+
u8 bssid_index;
u8 max_bssid_indicator;
+ u8 use_for;
+ u8 cannot_use_reasons;
+
u8 priv[] __aligned(sizeof(void *));
};
@@ -2870,11 +3035,17 @@ struct cfg80211_auth_request {
* if this is %NULL for a link, that link is not requested
* @elems: extra elements for the per-STA profile for this link
* @elems_len: length of the elements
+ * @disabled: If set this link should be included during association etc. but it
+ * should not be used until enabled by the AP MLD.
+ * @error: per-link error code, must be <= 0. If there is an error, then the
+ * operation as a whole must fail.
*/
struct cfg80211_assoc_link {
struct cfg80211_bss *bss;
const u8 *elems;
size_t elems_len;
+ bool disabled;
+ int error;
};
/**
@@ -3066,8 +3237,8 @@ struct cfg80211_ibss_params {
*
* @behaviour: requested BSS selection behaviour.
* @param: parameters for requestion behaviour.
- * @band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
- * @adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
+ * @param.band_pref: preferred band for %NL80211_BSS_SELECT_ATTR_BAND_PREF.
+ * @param.adjust: parameters for %NL80211_BSS_SELECT_ATTR_RSSI_ADJUST.
*/
struct cfg80211_bss_selection {
enum nl80211_bss_select_attr behaviour;
@@ -3473,7 +3644,7 @@ struct cfg80211_update_ft_ies_params {
* This structure provides information needed to transmit a mgmt frame
*
* @chan: channel to use
- * @offchan: indicates wether off channel operation is required
+ * @offchan: indicates whether off channel operation is required
* @wait: duration for ROC
* @buf: buffer to transmit
* @len: buffer length
@@ -3591,7 +3762,7 @@ struct cfg80211_nan_func_filter {
* @publish_bcast: if true, the solicited publish should be broadcasted
* @subscribe_active: if true, the subscribe is active
* @followup_id: the instance ID for follow up
- * @followup_reqid: the requestor instance ID for follow up
+ * @followup_reqid: the requester instance ID for follow up
* @followup_dest: MAC address of the recipient of the follow up
* @ttl: time to live counter in DW.
* @serv_spec_info: Service Specific Info
@@ -3925,7 +4096,7 @@ struct cfg80211_pmsr_request {
struct list_head list;
- struct cfg80211_pmsr_request_peer peers[];
+ struct cfg80211_pmsr_request_peer peers[] __counted_by(n_peers);
};
/**
@@ -4086,6 +4257,13 @@ struct mgmt_frame_regs {
*
* @change_bss: Modify parameters for a given BSS.
*
+ * @inform_bss: Called by cfg80211 while being informed about new BSS data
+ * for every BSS found within the reported data or frame. This is called
+ * from within the cfg8011 inform_bss handlers while holding the bss_lock.
+ * The data parameter is passed through from drv_data inside
+ * struct cfg80211_inform_bss.
+ * The new IE data for the BSS is explicitly passed.
+ *
* @set_txq_params: Set TX queue parameters
*
* @libertas_set_mesh_channel: Only for backward compatibility for libertas,
@@ -4372,6 +4550,7 @@ struct mgmt_frame_regs {
* @del_link_station: Remove a link of a station.
*
* @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames.
+ * @set_ttlm: set the TID to link mapping.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4421,7 +4600,7 @@ struct cfg80211_ops {
int (*start_ap)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *settings);
int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_beacon_data *info);
+ struct cfg80211_ap_update *info);
int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev,
unsigned int link_id);
@@ -4473,6 +4652,9 @@ struct cfg80211_ops {
int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
struct bss_parameters *params);
+ void (*inform_bss)(struct wiphy *wiphy, struct cfg80211_bss *bss,
+ const struct cfg80211_bss_ies *ies, void *data);
+
int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_txq_params *params);
@@ -4592,9 +4774,10 @@ struct cfg80211_ops {
struct cfg80211_gtk_rekey_data *data);
int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
- const u8 *peer, u8 action_code, u8 dialog_token,
- u16 status_code, u32 peer_capability,
- bool initiator, const u8 *buf, size_t len);
+ const u8 *peer, int link_id,
+ u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, bool initiator,
+ const u8 *buf, size_t len);
int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper);
@@ -4727,6 +4910,8 @@ struct cfg80211_ops {
struct link_station_del_parameters *params);
int (*set_hw_timestamp)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_set_hw_timestamp *hwts);
+ int (*set_ttlm)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ttlm_params *params);
};
/*
@@ -4783,6 +4968,10 @@ struct cfg80211_ops {
* @WIPHY_FLAG_SUPPORTS_EXT_KCK_32: The device supports 32-byte KCK keys.
* @WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER: The device could handle reg notify for
* NL80211_REGDOM_SET_BY_DRIVER.
+ * @WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON: reg_call_notifier() is called if driver
+ * set this flag to update channels on beacon hints.
+ * @WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY: support connection to non-primary link
+ * of an NSTR mobile AP MLD.
*/
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
@@ -4796,7 +4985,7 @@ enum wiphy_flags {
WIPHY_FLAG_IBSS_RSN = BIT(8),
WIPHY_FLAG_MESH_AUTH = BIT(10),
WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
- /* use hole at 12 */
+ WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = BIT(12),
WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
WIPHY_FLAG_AP_UAPSD = BIT(14),
WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
@@ -4809,6 +4998,7 @@ enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER = BIT(24),
+ WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON = BIT(25),
};
/**
@@ -5724,12 +5914,17 @@ struct cfg80211_cqm_config;
* wiphy_lock - lock the wiphy
* @wiphy: the wiphy to lock
*
- * This is mostly exposed so it can be done around registering and
- * unregistering netdevs that aren't created through cfg80211 calls,
- * since that requires locking in cfg80211 when the notifiers is
- * called, but that cannot differentiate which way it's called.
+ * This is needed around registering and unregistering netdevs that
+ * aren't created through cfg80211 calls, since that requires locking
+ * in cfg80211 when the notifiers is called, but that cannot
+ * differentiate which way it's called.
+ *
+ * It can also be used by drivers for their own purposes.
*
* When cfg80211 ops are called, the wiphy is already locked.
+ *
+ * Note that this makes sure that no workers that have been queued
+ * with wiphy_queue_work() are running.
*/
static inline void wiphy_lock(struct wiphy *wiphy)
__acquires(&wiphy->mtx)
@@ -5749,6 +5944,109 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
mutex_unlock(&wiphy->mtx);
}
+struct wiphy_work;
+typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
+
+struct wiphy_work {
+ struct list_head entry;
+ wiphy_work_func_t func;
+};
+
+static inline void wiphy_work_init(struct wiphy_work *work,
+ wiphy_work_func_t func)
+{
+ INIT_LIST_HEAD(&work->entry);
+ work->func = func;
+}
+
+/**
+ * wiphy_work_queue - queue work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @work: the work item
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that the wiphy mutex will be
+ * held as if wiphy_lock() was called, and that it cannot be running
+ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
+ * use just cancel_work() instead of cancel_work_sync(), it requires
+ * being in a section protected by wiphy_lock().
+ */
+void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work);
+
+/**
+ * wiphy_work_cancel - cancel previously queued work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work);
+
+/**
+ * wiphy_work_flush - flush previously queued work
+ * @wiphy: the wiphy, for debug purposes
+ * @work: the work to flush, this can be %NULL to flush all work
+ *
+ * Flush the work (i.e. run it if pending). This must be called
+ * under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work);
+
+struct wiphy_delayed_work {
+ struct wiphy_work work;
+ struct wiphy *wiphy;
+ struct timer_list timer;
+};
+
+void wiphy_delayed_work_timer(struct timer_list *t);
+
+static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork,
+ wiphy_work_func_t func)
+{
+ timer_setup(&dwork->timer, wiphy_delayed_work_timer, 0);
+ wiphy_work_init(&dwork->work, func);
+}
+
+/**
+ * wiphy_delayed_work_queue - queue delayed work for the wiphy
+ * @wiphy: the wiphy to queue for
+ * @dwork: the delayable worker
+ * @delay: number of jiffies to wait before queueing
+ *
+ * This is useful for work that must be done asynchronously, and work
+ * queued here has the special property that the wiphy mutex will be
+ * held as if wiphy_lock() was called, and that it cannot be running
+ * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can
+ * use just cancel_work() instead of cancel_work_sync(), it requires
+ * being in a section protected by wiphy_lock().
+ */
+void wiphy_delayed_work_queue(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork,
+ unsigned long delay);
+
+/**
+ * wiphy_delayed_work_cancel - cancel previously queued delayed work
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work to cancel
+ *
+ * Cancel the work *without* waiting for it, this assumes being
+ * called under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_delayed_work_cancel(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
+/**
+ * wiphy_delayed_work_flush - flush previously queued delayed work
+ * @wiphy: the wiphy, for debug purposes
+ * @dwork: the delayed work to flush
+ *
+ * Flush the work (i.e. run it if pending). This must be called
+ * under the wiphy mutex acquired by wiphy_lock().
+ */
+void wiphy_delayed_work_flush(struct wiphy *wiphy,
+ struct wiphy_delayed_work *dwork);
+
/**
* struct wireless_dev - wireless device state
*
@@ -5777,7 +6075,6 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
* wireless device if it has no netdev
* @u: union containing data specific to @iftype
* @connected: indicates if connected or not (STA mode)
- * @bssid: (private) Used by the internal configuration code
* @wext: (private) Used by the internal wireless extensions compat code
* @wext.ibss: (private) IBSS data part of wext handling
* @wext.connect: (private) connection handling data
@@ -5797,10 +6094,6 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
* @mgmt_registrations: list of registrations for management frames
* @mgmt_registrations_need_update: mgmt registrations were updated,
* need to propagate the update to the driver
- * @mtx: mutex used to lock data in this struct, may be used by drivers
- * and some API functions require it held
- * @beacon_interval: beacon interval used on this device for transmitting
- * beacons, 0 when not valid
* @address: The address for this device, valid only if @netdev is %NULL
* @is_running: true if this is a non-netdev device that has been started, e.g.
* the P2P Device.
@@ -5821,6 +6114,7 @@ static inline void wiphy_unlock(struct wiphy *wiphy)
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
* @nl_owner_dead: (private) owner socket went away
+ * @cqm_rssi_work: (private) CQM RSSI reporting work
* @cqm_config: (private) nl80211 RSSI monitor state
* @pmsr_list: (private) peer measurement requests
* @pmsr_lock: (private) peer measurements requests/results lock
@@ -5844,8 +6138,6 @@ struct wireless_dev {
struct list_head mgmt_registrations;
u8 mgmt_registrations_need_update:1;
- struct mutex mtx;
-
bool use_4addr, is_running, registered, registering;
u8 address[ETH_ALEN] __aligned(sizeof(u16));
@@ -5893,7 +6185,8 @@ struct wireless_dev {
} wext;
#endif
- struct cfg80211_cqm_config *cqm_config;
+ struct wiphy_work cqm_rssi_work;
+ struct cfg80211_cqm_config __rcu *cqm_config;
struct list_head pmsr_list;
spinlock_t pmsr_lock;
@@ -6135,13 +6428,11 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
/**
* ieee80211_mandatory_rates - get mandatory rates for a given band
* @sband: the band to look for rates in
- * @scan_width: width of the control channel
*
* This function returns a bitmap of the mandatory rates for the given
* band, bits are set according to the rate position in the bitrates array.
*/
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
- enum nl80211_bss_scan_width scan_width);
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband);
/*
* Radiotap parsing functions -- for controlled injection support
@@ -6482,7 +6773,7 @@ static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
* @ies: data consisting of IEs
* @len: length of data
*
- * Return: %NULL if the etended element could not be found or if
+ * Return: %NULL if the extended element could not be found or if
* the element is invalid (claims to be longer than the given
* data) or if the byte array doesn't match; otherwise return the
* requested element struct.
@@ -6561,6 +6852,28 @@ cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
}
/**
+ * cfg80211_defragment_element - Defrag the given element data into a buffer
+ *
+ * @elem: the element to defragment
+ * @ies: elements where @elem is contained
+ * @ieslen: length of @ies
+ * @data: buffer to store element data
+ * @data_len: length of @data
+ * @frag_id: the element ID of fragments
+ *
+ * Return: length of @data, or -EINVAL on error
+ *
+ * Copy out all data from an element that may be fragmented into @data, while
+ * skipping all headers.
+ *
+ * The function uses memmove() internally. It is acceptable to defragment an
+ * element in-place.
+ */
+ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies,
+ size_t ieslen, u8 *data, size_t data_len,
+ u8 frag_id);
+
+/**
* cfg80211_send_layer2_update - send layer 2 update frame
*
* @dev: network device
@@ -6607,7 +6920,7 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
/**
* regulatory_set_wiphy_regd - set regdom info for self managed drivers
* @wiphy: the wireless device we want to process the regulatory domain on
- * @rd: the regulatory domain informatoin to use for this wiphy
+ * @rd: the regulatory domain information to use for this wiphy
*
* Set the regulatory domain information for self-managed wiphys, only they
* may use this function. See %REGULATORY_WIPHY_SELF_MANAGED for more
@@ -6698,7 +7011,7 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
* Regulatory self-managed driver can use it to proactively
*
* @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
- * @freq: the freqency(in MHz) to be queried.
+ * @freq: the frequency (in MHz) to be queried.
* @rule: pointer to store the wmm rule from the regulatory db.
*
* Self-managed wireless drivers can use this function to query
@@ -6781,22 +7094,6 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
gfp_t gfp);
static inline struct cfg80211_bss * __must_check
-cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
- struct ieee80211_channel *rx_channel,
- enum nl80211_bss_scan_width scan_width,
- struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp)
-{
- struct cfg80211_inform_bss data = {
- .chan = rx_channel,
- .scan_width = scan_width,
- .signal = signal,
- };
-
- return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
-}
-
-static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss_frame(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
struct ieee80211_mgmt *mgmt, size_t len,
@@ -6804,7 +7101,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
{
struct cfg80211_inform_bss data = {
.chan = rx_channel,
- .scan_width = NL80211_BSS_CHAN_WIDTH_20,
.signal = signal,
};
@@ -6879,6 +7175,23 @@ int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
enum nl80211_band band);
/**
+ * cfg80211_ssid_eq - compare two SSIDs
+ * @a: first SSID
+ * @b: second SSID
+ *
+ * Return: %true if SSIDs are equal, %false otherwise.
+ */
+static inline bool
+cfg80211_ssid_eq(struct cfg80211_ssid *a, struct cfg80211_ssid *b)
+{
+ if (WARN_ON(!a || !b))
+ return false;
+ if (a->ssid_len != b->ssid_len)
+ return false;
+ return memcmp(a->ssid, b->ssid, a->ssid_len) ? false : true;
+}
+
+/**
* cfg80211_inform_bss_data - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
@@ -6907,26 +7220,6 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
gfp_t gfp);
static inline struct cfg80211_bss * __must_check
-cfg80211_inform_bss_width(struct wiphy *wiphy,
- struct ieee80211_channel *rx_channel,
- enum nl80211_bss_scan_width scan_width,
- enum cfg80211_bss_frame_type ftype,
- const u8 *bssid, u64 tsf, u16 capability,
- u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp)
-{
- struct cfg80211_inform_bss data = {
- .chan = rx_channel,
- .scan_width = scan_width,
- .signal = signal,
- };
-
- return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
- capability, beacon_interval, ie, ielen,
- gfp);
-}
-
-static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
enum cfg80211_bss_frame_type ftype,
@@ -6936,7 +7229,6 @@ cfg80211_inform_bss(struct wiphy *wiphy,
{
struct cfg80211_inform_bss data = {
.chan = rx_channel,
- .scan_width = NL80211_BSS_CHAN_WIDTH_20,
.signal = signal,
};
@@ -6946,6 +7238,25 @@ cfg80211_inform_bss(struct wiphy *wiphy,
}
/**
+ * __cfg80211_get_bss - get a BSS reference
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @channel: the channel to search on (or %NULL)
+ * @bssid: the desired BSSID (or %NULL)
+ * @ssid: the desired SSID (or %NULL)
+ * @ssid_len: length of the SSID (or 0)
+ * @bss_type: type of BSS, see &enum ieee80211_bss_type
+ * @privacy: privacy filter, see &enum ieee80211_privacy
+ * @use_for: indicates which use is intended
+ */
+struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ const u8 *bssid,
+ const u8 *ssid, size_t ssid_len,
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy privacy,
+ u32 use_for);
+
+/**
* cfg80211_get_bss - get a BSS reference
* @wiphy: the wiphy this BSS struct belongs to
* @channel: the channel to search on (or %NULL)
@@ -6954,13 +7265,20 @@ cfg80211_inform_bss(struct wiphy *wiphy,
* @ssid_len: length of the SSID (or 0)
* @bss_type: type of BSS, see &enum ieee80211_bss_type
* @privacy: privacy filter, see &enum ieee80211_privacy
+ *
+ * This version implies regular usage, %NL80211_BSS_USE_FOR_NORMAL.
*/
-struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- const u8 *bssid,
- const u8 *ssid, size_t ssid_len,
- enum ieee80211_bss_type bss_type,
- enum ieee80211_privacy privacy);
+static inline struct cfg80211_bss *
+cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel,
+ const u8 *bssid, const u8 *ssid, size_t ssid_len,
+ enum ieee80211_bss_type bss_type,
+ enum ieee80211_privacy privacy)
+{
+ return __cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len,
+ bss_type, privacy,
+ NL80211_BSS_USE_FOR_NORMAL);
+}
+
static inline struct cfg80211_bss *
cfg80211_get_ibss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
@@ -7021,19 +7339,6 @@ void cfg80211_bss_iter(struct wiphy *wiphy,
void *data),
void *iter_data);
-static inline enum nl80211_bss_scan_width
-cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef)
-{
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_5:
- return NL80211_BSS_CHAN_WIDTH_5;
- case NL80211_CHAN_WIDTH_10:
- return NL80211_BSS_CHAN_WIDTH_10;
- default:
- return NL80211_BSS_CHAN_WIDTH_20;
- }
-}
-
/**
* cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
* @dev: network device
@@ -7066,9 +7371,7 @@ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
/**
- * struct cfg80211_rx_assoc_resp - association response data
- * @bss: the BSS that association was requested with, ownership of the pointer
- * moves to cfg80211 in the call to cfg80211_rx_assoc_resp()
+ * struct cfg80211_rx_assoc_resp_data - association response data
* @buf: (Re)Association Response frame (header + body)
* @len: length of the frame data
* @uapsd_queues: bitmap of queues configured for uapsd. Same format
@@ -7078,10 +7381,12 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
* @ap_mld_addr: AP MLD address (in case of MLO)
* @links: per-link information indexed by link ID, use links[0] for
* non-MLO connections
+ * @links.bss: the BSS that association was requested with, ownership of the
+ * pointer moves to cfg80211 in the call to cfg80211_rx_assoc_resp()
* @links.status: Set this (along with a BSS pointer) for links that
* were rejected by the AP.
*/
-struct cfg80211_rx_assoc_resp {
+struct cfg80211_rx_assoc_resp_data {
const u8 *buf;
size_t len;
const u8 *req_ies;
@@ -7089,7 +7394,7 @@ struct cfg80211_rx_assoc_resp {
int uapsd_queues;
const u8 *ap_mld_addr;
struct {
- const u8 *addr;
+ u8 addr[ETH_ALEN] __aligned(2);
struct cfg80211_bss *bss;
u16 status;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -7098,7 +7403,7 @@ struct cfg80211_rx_assoc_resp {
/**
* cfg80211_rx_assoc_resp - notification of processed association response
* @dev: network device
- * @data: association response data, &struct cfg80211_rx_assoc_resp
+ * @data: association response data, &struct cfg80211_rx_assoc_resp_data
*
* After being asked to associate via cfg80211_ops::assoc() the driver must
* call either this function or cfg80211_auth_timeout().
@@ -7106,7 +7411,7 @@ struct cfg80211_rx_assoc_resp {
* This function may sleep. The caller must hold the corresponding wdev's mutex.
*/
void cfg80211_rx_assoc_resp(struct net_device *dev,
- struct cfg80211_rx_assoc_resp *data);
+ const struct cfg80211_rx_assoc_resp_data *data);
/**
* struct cfg80211_assoc_failure - association failure data
@@ -7225,7 +7530,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev,
* RFkill integration in cfg80211 is almost invisible to drivers,
* as cfg80211 automatically registers an rfkill instance for each
* wireless device it knows about. Soft kill is also translated
- * into disconnecting and turning all interfaces off, drivers are
+ * into disconnecting and turning all interfaces off. Drivers are
* expected to turn off the device when all interfaces are down.
*
* However, devices may have a hard RFkill line, in which case they
@@ -7273,7 +7578,7 @@ static inline void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
* the configuration mechanism.
*
* A driver supporting vendor commands must register them as an array
- * in struct wiphy, with handlers for each one, each command has an
+ * in struct wiphy, with handlers for each one. Each command has an
* OUI and sub command ID to identify it.
*
* Note that this feature should not be (ab)used to implement protocol
@@ -7437,7 +7742,7 @@ static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
* interact with driver-specific tools to aid, for instance,
* factory programming.
*
- * This chapter describes how drivers interact with it, for more
+ * This chapter describes how drivers interact with it. For more
* information see the nl80211 book's chapter on it.
*/
@@ -7825,7 +8130,8 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
* cfg80211_port_authorized - notify cfg80211 of successful security association
*
* @dev: network device
- * @bssid: the BSSID of the AP
+ * @peer_addr: BSSID of the AP/P2P GO in case of STA/GC or STA/GC MAC address
+ * in case of AP/P2P GO
* @td_bitmap: transition disable policy
* @td_bitmap_len: Length of transition disable policy
* @gfp: allocation flags
@@ -7836,8 +8142,11 @@ void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
* should be preceded with a call to cfg80211_connect_result(),
* cfg80211_connect_done(), cfg80211_connect_bss() or cfg80211_roamed() to
* indicate the 802.11 association.
+ * This function can also be called by AP/P2P GO driver that supports
+ * authentication offload. In this case the peer_mac passed is that of
+ * associated STA/GC.
*/
-void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid,
+void cfg80211_port_authorized(struct net_device *dev, const u8 *peer_addr,
const u8* td_bitmap, u8 td_bitmap_len, gfp_t gfp);
/**
@@ -7975,7 +8284,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* @link_id: the ID of the link the frame was received on
* @buf: Management frame (header + body)
* @len: length of the frame data
- * @flags: flags, as defined in enum nl80211_rxmgmt_flags
+ * @flags: flags, as defined in &enum nl80211_rxmgmt_flags
* @rx_tstamp: Hardware timestamp of frame RX in nanoseconds
* @ack_tstamp: Hardware timestamp of ack TX in nanoseconds
*/
@@ -8426,7 +8735,7 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
* @link_id: the link ID for MLO, must be 0 for non-MLO
* @punct_bitmap: the new puncturing bitmap
*
- * Caller must acquire wdev_lock, therefore must only be called from sleepable
+ * Caller must hold wiphy mutex, therefore must only be called from sleepable
* driver context!
*/
void cfg80211_ch_switch_notify(struct net_device *dev,
@@ -8666,6 +8975,18 @@ static inline size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
}
/**
+ * ieee80211_fragment_element - fragment the last element in skb
+ * @skb: The skbuf that the element was added to
+ * @len_pos: Pointer to length of the element to fragment
+ * @frag_id: The element ID to use for fragments
+ *
+ * This function fragments all data after @len_pos, adding fragmentation
+ * elements with the given ID as appropriate. The SKB will grow in size
+ * accordingly.
+ */
+void ieee80211_fragment_element(struct sk_buff *skb, u8 *len_pos, u8 frag_id);
+
+/**
* cfg80211_report_wowlan_wakeup - report wakeup from WoWLAN
* @wdev: the wireless device reporting the wakeup
* @wakeup: the wakeup report
@@ -8914,9 +9235,9 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
/**
* cfg80211_assoc_comeback - notification of association that was
- * temporarly rejected with a comeback
+ * temporarily rejected with a comeback
* @netdev: network device
- * @ap_addr: AP (MLD) address that rejected the assocation
+ * @ap_addr: AP (MLD) address that rejected the association
* @timeout: timeout interval value TUs.
*
* this function may sleep. the caller must hold the corresponding wdev's mutex.
@@ -9067,4 +9388,73 @@ static inline int cfg80211_color_change_notify(struct net_device *dev)
bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap,
const struct cfg80211_chan_def *chandef);
+/**
+ * cfg80211_links_removed - Notify about removed STA MLD setup links.
+ * @dev: network device.
+ * @link_mask: BIT mask of removed STA MLD setup link IDs.
+ *
+ * Inform cfg80211 and the userspace about removed STA MLD setup links due to
+ * AP MLD removing the corresponding affiliated APs with Multi-Link
+ * reconfiguration. Note that it's not valid to remove all links, in this
+ * case disconnect instead.
+ * Also note that the wdev mutex must be held.
+ */
+void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
+
+/**
+ * cfg80211_schedule_channels_check - schedule regulatory check if needed
+ * @wdev: the wireless device to check
+ *
+ * In case the device supports NO_IR or DFS relaxations, schedule regulatory
+ * channels check, as previous concurrent operation conditions may not
+ * hold anymore.
+ */
+void cfg80211_schedule_channels_check(struct wireless_dev *wdev);
+
+#ifdef CONFIG_CFG80211_DEBUGFS
+/**
+ * wiphy_locked_debugfs_read - do a locked read in debugfs
+ * @wiphy: the wiphy to use
+ * @file: the file being read
+ * @buf: the buffer to fill and then read from
+ * @bufsize: size of the buffer
+ * @userbuf: the user buffer to copy to
+ * @count: read count
+ * @ppos: read position
+ * @handler: the read handler to call (under wiphy lock)
+ * @data: additional data to pass to the read handler
+ */
+ssize_t wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize,
+ char __user *userbuf, size_t count,
+ loff_t *ppos,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t bufsize,
+ void *data),
+ void *data);
+
+/**
+ * wiphy_locked_debugfs_write - do a locked write in debugfs
+ * @wiphy: the wiphy to use
+ * @file: the file being written to
+ * @buf: the buffer to copy the user data to
+ * @bufsize: size of the buffer
+ * @userbuf: the user buffer to copy from
+ * @count: read count
+ * @handler: the write handler to call (under wiphy lock)
+ * @data: additional data to pass to the write handler
+ */
+ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize,
+ const char __user *userbuf, size_t count,
+ ssize_t (*handler)(struct wiphy *wiphy,
+ struct file *file,
+ char *buf,
+ size_t count,
+ void *data),
+ void *data);
+#endif
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 0c2778a836db..cd95711b12b8 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -20,6 +20,7 @@ struct wpan_phy;
struct wpan_phy_cca;
struct cfg802154_scan_request;
struct cfg802154_beacon_request;
+struct ieee802154_addr;
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
struct ieee802154_llsec_device_key;
@@ -77,6 +78,12 @@ struct cfg802154_ops {
struct cfg802154_beacon_request *request);
int (*stop_beacons)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev);
+ int (*associate)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *coord);
+ int (*disassociate)(struct wpan_phy *wpan_phy,
+ struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *target);
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
void (*get_llsec_table)(struct wpan_phy *wpan_phy,
struct wpan_dev *wpan_dev,
@@ -170,7 +177,8 @@ wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
}
/**
- * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
+ * enum wpan_phy_flags - WPAN PHY state flags
+ * @WPAN_PHY_FLAG_TXPOWER: Indicates that transceiver will support
* transmit power setting.
* @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
* level setting.
@@ -178,12 +186,15 @@ wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
* setting.
* @WPAN_PHY_FLAG_STATE_QUEUE_STOPPED: Indicates that the transmit queue was
* temporarily stopped.
+ * @WPAN_PHY_FLAG_DATAGRAMS_ONLY: Indicates that transceiver is only able to
+ * send/receive datagrams.
*/
enum wpan_phy_flags {
WPAN_PHY_FLAG_TXPOWER = BIT(1),
WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2),
WPAN_PHY_FLAG_CCA_MODE = BIT(3),
WPAN_PHY_FLAG_STATE_QUEUE_STOPPED = BIT(4),
+ WPAN_PHY_FLAG_DATAGRAMS_ONLY = BIT(5),
};
struct wpan_phy {
@@ -300,6 +311,22 @@ struct ieee802154_coord_desc {
};
/**
+ * struct ieee802154_pan_device - PAN device information
+ * @pan_id: the PAN ID of this device
+ * @mode: the preferred mode to reach the device
+ * @short_addr: the short address of this device
+ * @extended_addr: the extended address of this device
+ * @node: the list node
+ */
+struct ieee802154_pan_device {
+ __le16 pan_id;
+ u8 mode;
+ __le16 short_addr;
+ __le64 extended_addr;
+ struct list_head node;
+};
+
+/**
* struct cfg802154_scan_request - Scan request
*
* @type: type of scan to be performed
@@ -474,6 +501,13 @@ struct wpan_dev {
/* fallback for acknowledgment bit setting */
bool ackreq;
+
+ /* Associations */
+ struct mutex association_lock;
+ struct ieee802154_pan_device *parent;
+ struct list_head children;
+ unsigned int max_associations;
+ unsigned int nchildren;
};
#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev)
@@ -525,4 +559,46 @@ static inline const char *wpan_phy_name(struct wpan_phy *phy)
void ieee802154_configure_durations(struct wpan_phy *phy,
unsigned int page, unsigned int channel);
+/**
+ * cfg802154_device_is_associated - Checks whether we are associated to any device
+ * @wpan_dev: the wpan device
+ * @return: true if we are associated
+ */
+bool cfg802154_device_is_associated(struct wpan_dev *wpan_dev);
+
+/**
+ * cfg802154_device_is_parent - Checks if a device is our coordinator
+ * @wpan_dev: the wpan device
+ * @target: the expected parent
+ * @return: true if @target is our coordinator
+ */
+bool cfg802154_device_is_parent(struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *target);
+
+/**
+ * cfg802154_device_is_child - Checks whether a device is associated to us
+ * @wpan_dev: the wpan device
+ * @target: the expected child
+ * @return: the PAN device
+ */
+struct ieee802154_pan_device *
+cfg802154_device_is_child(struct wpan_dev *wpan_dev,
+ struct ieee802154_addr *target);
+
+/**
+ * cfg802154_set_max_associations - Limit the number of future associations
+ * @wpan_dev: the wpan device
+ * @max: the maximum number of devices we accept to associate
+ * @return: the old maximum value
+ */
+unsigned int cfg802154_set_max_associations(struct wpan_dev *wpan_dev,
+ unsigned int max);
+
+/**
+ * cfg802154_get_free_short_addr - Get a free address among the known devices
+ * @wpan_dev: the wpan device
+ * @return: a random short address expectedly unused on our PAN
+ */
+__le16 cfg802154_get_free_short_addr(struct wpan_dev *wpan_dev);
+
#endif /* __NET_CFG802154_H */
diff --git a/include/net/codel.h b/include/net/codel.h
index 5fed2f16cb8d..aa80f744826c 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -145,8 +145,8 @@ struct codel_vars {
* @maxpacket: largest packet we've seen so far
* @drop_count: temp count of dropped packets in dequeue()
* @drop_len: bytes of dropped packets in dequeue()
- * ecn_mark: number of packets we ECN marked instead of dropping
- * ce_mark: number of packets CE marked because sojourn time was above ce_threshold
+ * @ecn_mark: number of packets we ECN marked instead of dropping
+ * @ce_mark: number of packets CE marked because sojourn time was above ce_threshold
*/
struct codel_stats {
u32 maxpacket;
diff --git a/include/net/datalink.h b/include/net/datalink.h
index c837ffc7ebf8..6c529a40e00d 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -23,6 +23,4 @@ struct datalink_proto {
struct list_head node;
};
-struct datalink_proto *make_EII_client(void);
-void destroy_EII_client(struct datalink_proto *dl);
#endif
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 6a942e70e451..9ac394bdfbe4 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -123,6 +123,7 @@ struct devlink_port {
struct list_head list;
struct list_head region_list;
struct devlink *devlink;
+ const struct devlink_port_ops *ops;
unsigned int index;
spinlock_t type_lock; /* Protects type and type_eth/ib
* structures consistency.
@@ -149,6 +150,7 @@ struct devlink_port {
struct devlink_rate *devlink_rate;
struct devlink_linecard *linecard;
+ u32 rel_index;
};
struct devlink_port_new_attrs {
@@ -220,7 +222,7 @@ struct devlink_dpipe_field {
/**
* struct devlink_dpipe_header - dpipe header object
* @name: header name
- * @id: index, global/local detrmined by global bit
+ * @id: index, global/local determined by global bit
* @fields: fields
* @fields_count: number of fields
* @global: indicates if header is shared like most protocol header
@@ -240,7 +242,7 @@ struct devlink_dpipe_header {
* @header_index: header index (packets can have several headers of same
* type like in case of tunnels)
* @header: header
- * @fieled_id: field index
+ * @field_id: field index
*/
struct devlink_dpipe_match {
enum devlink_dpipe_match_type type;
@@ -255,7 +257,7 @@ struct devlink_dpipe_match {
* @header_index: header index (packets can have several headers of same
* type like in case of tunnels)
* @header: header
- * @fieled_id: field index
+ * @field_id: field index
*/
struct devlink_dpipe_action {
enum devlink_dpipe_action_type type;
@@ -291,7 +293,7 @@ struct devlink_dpipe_value {
* struct devlink_dpipe_entry - table entry object
* @index: index of the entry in the table
* @match_values: match values
- * @matche_values_count: count of matches tuples
+ * @match_values_count: count of matches tuples
* @action_values: actions values
* @action_values_count: count of actions values
* @counter: value of counter
@@ -341,7 +343,9 @@ struct devlink_dpipe_table_ops;
*/
struct devlink_dpipe_table {
void *priv;
+ /* private: */
struct list_head list;
+ /* public: */
const char *name;
bool counters_enabled;
bool counter_control_extern;
@@ -354,13 +358,13 @@ struct devlink_dpipe_table {
/**
* struct devlink_dpipe_table_ops - dpipe_table ops
- * @actions_dump - dumps all tables actions
- * @matches_dump - dumps all tables matches
- * @entries_dump - dumps all active entries in the table
- * @counters_set_update - when changing the counter status hardware sync
+ * @actions_dump: dumps all tables actions
+ * @matches_dump: dumps all tables matches
+ * @entries_dump: dumps all active entries in the table
+ * @counters_set_update: when changing the counter status hardware sync
* maybe needed to allocate/free counter related
* resources
- * @size_get - get size
+ * @size_get: get size
*/
struct devlink_dpipe_table_ops {
int (*actions_dump)(void *priv, struct sk_buff *skb);
@@ -373,8 +377,8 @@ struct devlink_dpipe_table_ops {
/**
* struct devlink_dpipe_headers - dpipe headers
- * @headers - header array can be shared (global bit) or driver specific
- * @headers_count - count of headers
+ * @headers: header array can be shared (global bit) or driver specific
+ * @headers_count: count of headers
*/
struct devlink_dpipe_headers {
struct devlink_dpipe_header **headers;
@@ -386,7 +390,7 @@ struct devlink_dpipe_headers {
* @size_min: minimum size which can be set
* @size_max: maximum size which can be set
* @size_granularity: size granularity
- * @size_unit: resource's basic unit
+ * @unit: resource's basic unit
*/
struct devlink_resource_size_params {
u64 size_min;
@@ -456,6 +460,7 @@ struct devlink_flash_notify {
/**
* struct devlink_param - devlink configuration parameter data
+ * @id: devlink parameter id number
* @name: name of the parameter
* @generic: indicates if the parameter is generic or driver specific
* @type: parameter type
@@ -631,6 +636,7 @@ enum devlink_param_generic_id {
* struct devlink_flash_update_params - Flash Update parameters
* @fw: pointer to the firmware data to update from
* @component: the flash component to update
+ * @overwrite_mask: which types of flash update are supported (may be %0)
*
* With the exception of fw, drivers must opt-in to parameters by
* setting the appropriate bit in the supported_flash_update_params field in
@@ -1261,7 +1267,7 @@ struct devlink_ops {
/**
* @supported_flash_update_params:
* mask of parameters supported by the driver's .flash_update
- * implemementation.
+ * implementation.
*/
u32 supported_flash_update_params;
unsigned long reload_actions;
@@ -1273,12 +1279,6 @@ struct devlink_ops {
int (*reload_up)(struct devlink *devlink, enum devlink_reload_action action,
enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack);
- int (*port_type_set)(struct devlink_port *devlink_port,
- enum devlink_port_type port_type);
- int (*port_split)(struct devlink *devlink, struct devlink_port *port,
- unsigned int count, struct netlink_ext_ack *extack);
- int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port,
- struct netlink_ext_ack *extack);
int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -1435,80 +1435,17 @@ struct devlink_ops {
const struct devlink_trap_policer *policer,
u64 *p_drops);
/**
- * @port_function_hw_addr_get: Port function's hardware address get function.
- *
- * Should be used by device drivers to report the hardware address of a function managed
- * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
- * function handling for a particular port.
- *
- * Note: @extack can be NULL when port notifier queries the port function.
- */
- int (*port_function_hw_addr_get)(struct devlink_port *port, u8 *hw_addr,
- int *hw_addr_len,
- struct netlink_ext_ack *extack);
- /**
- * @port_function_hw_addr_set: Port function's hardware address set function.
- *
- * Should be used by device drivers to set the hardware address of a function managed
- * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
- * function handling for a particular port.
- */
- int (*port_function_hw_addr_set)(struct devlink_port *port,
- const u8 *hw_addr, int hw_addr_len,
- struct netlink_ext_ack *extack);
- /**
- * @port_fn_roce_get: Port function's roce get function.
- *
- * Query RoCE state of a function managed by the devlink port.
- * Return -EOPNOTSUPP if port function RoCE handling is not supported.
- */
- int (*port_fn_roce_get)(struct devlink_port *devlink_port,
- bool *is_enable,
- struct netlink_ext_ack *extack);
- /**
- * @port_fn_roce_set: Port function's roce set function.
- *
- * Enable/Disable the RoCE state of a function managed by the devlink
- * port.
- * Return -EOPNOTSUPP if port function RoCE handling is not supported.
- */
- int (*port_fn_roce_set)(struct devlink_port *devlink_port,
- bool enable, struct netlink_ext_ack *extack);
- /**
- * @port_fn_migratable_get: Port function's migratable get function.
- *
- * Query migratable state of a function managed by the devlink port.
- * Return -EOPNOTSUPP if port function migratable handling is not
- * supported.
- */
- int (*port_fn_migratable_get)(struct devlink_port *devlink_port,
- bool *is_enable,
- struct netlink_ext_ack *extack);
- /**
- * @port_fn_migratable_set: Port function's migratable set function.
- *
- * Enable/Disable migratable state of a function managed by the devlink
- * port.
- * Return -EOPNOTSUPP if port function migratable handling is not
- * supported.
- */
- int (*port_fn_migratable_set)(struct devlink_port *devlink_port,
- bool enable,
- struct netlink_ext_ack *extack);
- /**
* port_new() - Add a new port function of a specified flavor
* @devlink: Devlink instance
* @attrs: attributes of the new port
* @extack: extack for reporting error messages
- * @new_port_index: index of the new port
+ * @devlink_port: pointer to store new devlink port pointer
*
* Devlink core will call this device driver function upon user request
* to create a new port function of a specified flavor and optional
* attributes
*
* Notes:
- * - Called without devlink instance lock being held. Drivers must
- * implement own means of synchronization
* - On success, drivers must register a port with devlink core
*
* Return: 0 on success, negative value otherwise.
@@ -1516,56 +1453,7 @@ struct devlink_ops {
int (*port_new)(struct devlink *devlink,
const struct devlink_port_new_attrs *attrs,
struct netlink_ext_ack *extack,
- unsigned int *new_port_index);
- /**
- * port_del() - Delete a port function
- * @devlink: Devlink instance
- * @port_index: port function index to delete
- * @extack: extack for reporting error messages
- *
- * Devlink core will call this device driver function upon user request
- * to delete a previously created port function
- *
- * Notes:
- * - Called without devlink instance lock being held. Drivers must
- * implement own means of synchronization
- * - On success, drivers must unregister the corresponding devlink
- * port
- *
- * Return: 0 on success, negative value otherwise.
- */
- int (*port_del)(struct devlink *devlink, unsigned int port_index,
- struct netlink_ext_ack *extack);
- /**
- * port_fn_state_get() - Get the state of a port function
- * @devlink: Devlink instance
- * @port: The devlink port
- * @state: Admin configured state
- * @opstate: Current operational state
- * @extack: extack for reporting error messages
- *
- * Reports the admin and operational state of a devlink port function
- *
- * Return: 0 on success, negative value otherwise.
- */
- int (*port_fn_state_get)(struct devlink_port *port,
- enum devlink_port_fn_state *state,
- enum devlink_port_fn_opstate *opstate,
- struct netlink_ext_ack *extack);
- /**
- * port_fn_state_set() - Set the admin state of a port function
- * @devlink: Devlink instance
- * @port: The devlink port
- * @state: Admin state
- * @extack: extack for reporting error messages
- *
- * Set the admin state of a devlink port function
- *
- * Return: 0 on success, negative value otherwise.
- */
- int (*port_fn_state_set)(struct devlink_port *port,
- enum devlink_port_fn_state state,
- struct netlink_ext_ack *extack);
+ struct devlink_port **devlink_port);
/**
* Rate control callbacks.
@@ -1655,15 +1543,146 @@ void devl_unregister(struct devlink *devlink);
void devlink_register(struct devlink *devlink);
void devlink_unregister(struct devlink *devlink);
void devlink_free(struct devlink *devlink);
+
+/**
+ * struct devlink_port_ops - Port operations
+ * @port_split: Callback used to split the port into multiple ones.
+ * @port_unsplit: Callback used to unsplit the port group back into
+ * a single port.
+ * @port_type_set: Callback used to set a type of a port.
+ * @port_del: Callback used to delete selected port along with related function.
+ * Devlink core calls this upon user request to delete
+ * a port previously created by devlink_ops->port_new().
+ * @port_fn_hw_addr_get: Callback used to set port function's hardware address.
+ * Should be used by device drivers to report
+ * the hardware address of a function managed
+ * by the devlink port.
+ * @port_fn_hw_addr_set: Callback used to set port function's hardware address.
+ * Should be used by device drivers to set the hardware
+ * address of a function managed by the devlink port.
+ * @port_fn_roce_get: Callback used to get port function's RoCE capability.
+ * Should be used by device drivers to report
+ * the current state of RoCE capability of a function
+ * managed by the devlink port.
+ * @port_fn_roce_set: Callback used to set port function's RoCE capability.
+ * Should be used by device drivers to enable/disable
+ * RoCE capability of a function managed
+ * by the devlink port.
+ * @port_fn_migratable_get: Callback used to get port function's migratable
+ * capability. Should be used by device drivers
+ * to report the current state of migratable capability
+ * of a function managed by the devlink port.
+ * @port_fn_migratable_set: Callback used to set port function's migratable
+ * capability. Should be used by device drivers
+ * to enable/disable migratable capability of
+ * a function managed by the devlink port.
+ * @port_fn_state_get: Callback used to get port function's state.
+ * Should be used by device drivers to report
+ * the current admin and operational state of a
+ * function managed by the devlink port.
+ * @port_fn_state_set: Callback used to get port function's state.
+ * Should be used by device drivers set
+ * the admin state of a function managed
+ * by the devlink port.
+ * @port_fn_ipsec_crypto_get: Callback used to get port function's ipsec_crypto
+ * capability. Should be used by device drivers
+ * to report the current state of ipsec_crypto
+ * capability of a function managed by the devlink
+ * port.
+ * @port_fn_ipsec_crypto_set: Callback used to set port function's ipsec_crypto
+ * capability. Should be used by device drivers to
+ * enable/disable ipsec_crypto capability of a
+ * function managed by the devlink port.
+ * @port_fn_ipsec_packet_get: Callback used to get port function's ipsec_packet
+ * capability. Should be used by device drivers
+ * to report the current state of ipsec_packet
+ * capability of a function managed by the devlink
+ * port.
+ * @port_fn_ipsec_packet_set: Callback used to set port function's ipsec_packet
+ * capability. Should be used by device drivers to
+ * enable/disable ipsec_packet capability of a
+ * function managed by the devlink port.
+ *
+ * Note: Driver should return -EOPNOTSUPP if it doesn't support
+ * port function (@port_fn_*) handling for a particular port.
+ */
+struct devlink_port_ops {
+ int (*port_split)(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack);
+ int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack);
+ int (*port_type_set)(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type);
+ int (*port_del)(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_hw_addr_get)(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_hw_addr_set)(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_roce_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_roce_set)(struct devlink_port *devlink_port,
+ bool enable, struct netlink_ext_ack *extack);
+ int (*port_fn_migratable_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_migratable_set)(struct devlink_port *devlink_port,
+ bool enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_state_get)(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_state_set)(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_crypto_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_crypto_set)(struct devlink_port *devlink_port,
+ bool enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_packet_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_packet_set)(struct devlink_port *devlink_port,
+ bool enable,
+ struct netlink_ext_ack *extack);
+};
+
void devlink_port_init(struct devlink *devlink,
struct devlink_port *devlink_port);
void devlink_port_fini(struct devlink_port *devlink_port);
-int devl_port_register(struct devlink *devlink,
- struct devlink_port *devlink_port,
- unsigned int port_index);
-int devlink_port_register(struct devlink *devlink,
- struct devlink_port *devlink_port,
- unsigned int port_index);
+
+int devl_port_register_with_ops(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index,
+ const struct devlink_port_ops *ops);
+
+static inline int devl_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index)
+{
+ return devl_port_register_with_ops(devlink, devlink_port,
+ port_index, NULL);
+}
+
+int devlink_port_register_with_ops(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index,
+ const struct devlink_port_ops *ops);
+
+static inline int devlink_port_register(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index)
+{
+ return devlink_port_register_with_ops(devlink, devlink_port,
+ port_index, NULL);
+}
+
void devl_port_unregister(struct devlink_port *devlink_port);
void devlink_port_unregister(struct devlink_port *devlink_port);
void devlink_port_type_eth_set(struct devlink_port *devlink_port);
@@ -1679,6 +1698,8 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro
void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port,
u32 controller, u16 pf, u32 sf,
bool external);
+int devl_port_fn_devlink_set(struct devlink_port *devlink_port,
+ struct devlink *fn_devlink);
struct devlink_rate *
devl_rate_node_create(struct devlink *devlink, void *priv, char *node_name,
struct devlink_rate *parent);
@@ -1699,8 +1720,8 @@ void devlink_linecard_provision_clear(struct devlink_linecard *linecard);
void devlink_linecard_provision_fail(struct devlink_linecard *linecard);
void devlink_linecard_activate(struct devlink_linecard *linecard);
void devlink_linecard_deactivate(struct devlink_linecard *linecard);
-void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
- struct devlink *nested_devlink);
+int devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
+ struct devlink *nested_devlink);
int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
@@ -1755,9 +1776,6 @@ int devl_resource_size_get(struct devlink *devlink,
int devl_dpipe_table_resource_set(struct devlink *devlink,
const char *table_name, u64 resource_id,
u64 resource_units);
-int devlink_dpipe_table_resource_set(struct devlink *devlink,
- const char *table_name, u64 resource_id,
- u64 resource_units);
void devl_resource_occ_get_register(struct devlink *devlink,
u64 resource_id,
devlink_resource_occ_get_t *occ_get,
@@ -1802,8 +1820,6 @@ devlink_port_region_create(struct devlink_port *port,
u32 region_max_snapshots, u64 region_size);
void devl_region_destroy(struct devlink_region *region);
void devlink_region_destroy(struct devlink_region *region);
-void devlink_port_region_destroy(struct devlink_region *region);
-
int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id);
void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id);
int devlink_region_snapshot_create(struct devlink_region *region,
@@ -1838,36 +1854,36 @@ int devlink_info_version_running_put_ext(struct devlink_info_req *req,
const char *version_value,
enum devlink_info_version_type version_type);
-int devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg);
-int devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg);
-
-int devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name);
-int devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg);
-
-int devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
- const char *name);
-int devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg);
-int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
- const char *name);
-int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg);
-
-int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
-int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
-int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len);
-
-int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
- bool value);
-int devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u8 value);
-int devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u32 value);
-int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u64 value);
-int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const char *value);
-int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const void *value, u32 value_len);
+void devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg);
+void devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg);
+
+void devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name);
+void devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg);
+
+void devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name);
+void devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg);
+void devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg,
+ const char *name);
+void devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg);
+
+void devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
+void devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
+void devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
+ u16 value_len);
+
+void devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ bool value);
+void devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u8 value);
+void devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u32 value);
+void devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ u64 value);
+void devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const char *value);
+void devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
+ const void *value, u32 value_len);
struct devlink_health_reporter *
devl_port_health_reporter_create(struct devlink_port *port,
@@ -1905,6 +1921,8 @@ devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
void
devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
+int devl_nested_devlink_set(struct devlink *devlink,
+ struct devlink *nested_devlink);
bool devlink_is_reload_failed(const struct devlink *devlink);
void devlink_remote_reload_actions_performed(struct devlink *devlink,
enum devlink_reload_limit limit,
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index a2b953b57689..6d3a20163260 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -20,9 +20,14 @@
FN(IP_NOPROTO) \
FN(SOCKET_RCVBUFF) \
FN(PROTO_MEM) \
+ FN(TCP_AUTH_HDR) \
FN(TCP_MD5NOTFOUND) \
FN(TCP_MD5UNEXPECTED) \
FN(TCP_MD5FAILURE) \
+ FN(TCP_AONOTFOUND) \
+ FN(TCP_AOUNEXPECTED) \
+ FN(TCP_AOKEYNOTFOUND) \
+ FN(TCP_AOFAILURE) \
FN(SOCKET_BACKLOG) \
FN(TCP_FLAGS) \
FN(TCP_ZEROWINDOW) \
@@ -30,6 +35,7 @@
FN(TCP_OVERWINDOW) \
FN(TCP_OFOMERGE) \
FN(TCP_RFC7323_PAWS) \
+ FN(TCP_OLD_SEQUENCE) \
FN(TCP_INVALID_SEQUENCE) \
FN(TCP_RESET) \
FN(TCP_INVALID_SYN) \
@@ -78,6 +84,11 @@
FN(IPV6_NDISC_BAD_CODE) \
FN(IPV6_NDISC_BAD_OPTIONS) \
FN(IPV6_NDISC_NS_OTHERHOST) \
+ FN(QUEUE_PURGE) \
+ FN(TC_COOKIE_ERROR) \
+ FN(PACKET_SOCK_ERROR) \
+ FN(TC_CHAIN_NOTFOUND) \
+ FN(TC_RECLASSIFY_LOOP) \
FNe(MAX)
/**
@@ -140,6 +151,11 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_PROTO_MEM,
/**
+ * @SKB_DROP_REASON_TCP_AUTH_HDR: TCP-MD5 or TCP-AO hashes are met
+ * twice or set incorrectly.
+ */
+ SKB_DROP_REASON_TCP_AUTH_HDR,
+ /**
* @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected,
* corresponding to LINUX_MIB_TCPMD5NOTFOUND
*/
@@ -155,6 +171,26 @@ enum skb_drop_reason {
*/
SKB_DROP_REASON_TCP_MD5FAILURE,
/**
+ * @SKB_DROP_REASON_TCP_AONOTFOUND: no TCP-AO hash and one was expected,
+ * corresponding to LINUX_MIB_TCPAOREQUIRED
+ */
+ SKB_DROP_REASON_TCP_AONOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TCP_AOUNEXPECTED: TCP-AO hash is present and it
+ * was not expected, corresponding to LINUX_MIB_TCPAOKEYNOTFOUND
+ */
+ SKB_DROP_REASON_TCP_AOUNEXPECTED,
+ /**
+ * @SKB_DROP_REASON_TCP_AOKEYNOTFOUND: TCP-AO key is unknown,
+ * corresponding to LINUX_MIB_TCPAOKEYNOTFOUND
+ */
+ SKB_DROP_REASON_TCP_AOKEYNOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TCP_AOFAILURE: TCP-AO hash is wrong,
+ * corresponding to LINUX_MIB_TCPAOBAD
+ */
+ SKB_DROP_REASON_TCP_AOFAILURE,
+ /**
* @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog (
* see LINUX_MIB_TCPBACKLOGDROP)
*/
@@ -188,6 +224,8 @@ enum skb_drop_reason {
* LINUX_MIB_PAWSESTABREJECTED
*/
SKB_DROP_REASON_TCP_RFC7323_PAWS,
+ /** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */
+ SKB_DROP_REASON_TCP_OLD_SEQUENCE,
/** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */
SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
/** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */
@@ -339,6 +377,25 @@ enum skb_drop_reason {
* for another host.
*/
SKB_DROP_REASON_IPV6_NDISC_NS_OTHERHOST,
+ /** @SKB_DROP_REASON_QUEUE_PURGE: bulk free. */
+ SKB_DROP_REASON_QUEUE_PURGE,
+ /**
+ * @SKB_DROP_REASON_TC_COOKIE_ERROR: An error occurred whilst
+ * processing a tc ext cookie.
+ */
+ SKB_DROP_REASON_TC_COOKIE_ERROR,
+ /**
+ * @SKB_DROP_REASON_PACKET_SOCK_ERROR: generic packet socket errors
+ * after its filter matches an incoming packet.
+ */
+ SKB_DROP_REASON_PACKET_SOCK_ERROR,
+ /** @SKB_DROP_REASON_TC_CHAIN_NOTFOUND: tc chain lookup failed. */
+ SKB_DROP_REASON_TC_CHAIN_NOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TC_RECLASSIFY_LOOP: tc exceeded max reclassify loop
+ * iterations.
+ */
+ SKB_DROP_REASON_TC_RECLASSIFY_LOOP,
/**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
index 685fb37df8e8..56cb7be92244 100644
--- a/include/net/dropreason.h
+++ b/include/net/dropreason.h
@@ -23,6 +23,12 @@ enum skb_drop_reason_subsys {
*/
SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR,
+ /**
+ * @SKB_DROP_REASON_SUBSYS_OPENVSWITCH: openvswitch drop reasons,
+ * see net/openvswitch/drop.h
+ */
+ SKB_DROP_REASON_SUBSYS_OPENVSWITCH,
+
/** @SKB_DROP_REASON_SUBSYS_NUM: number of subsystems defined */
SKB_DROP_REASON_SUBSYS_NUM
};
diff --git a/include/net/dsa.h b/include/net/dsa.h
index ab0f0a5b0860..82135fbdb1e6 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -102,11 +102,11 @@ struct dsa_device_ops {
const char *name;
enum dsa_tag_protocol proto;
/* Some tagging protocols either mangle or shift the destination MAC
- * address, in which case the DSA master would drop packets on ingress
+ * address, in which case the DSA conduit would drop packets on ingress
* if what it understands out of the destination MAC address is not in
* its RX filter.
*/
- bool promisc_on_master;
+ bool promisc_on_conduit;
};
struct dsa_lag {
@@ -236,12 +236,12 @@ struct dsa_bridge {
};
struct dsa_port {
- /* A CPU port is physically connected to a master device.
- * A user port exposed to userspace has a slave device.
+ /* A CPU port is physically connected to a conduit device. A user port
+ * exposes a network device to user-space, called 'user' here.
*/
union {
- struct net_device *master;
- struct net_device *slave;
+ struct net_device *conduit;
+ struct net_device *user;
};
/* Copy of the tagging protocol operations, for quicker access
@@ -249,7 +249,7 @@ struct dsa_port {
*/
const struct dsa_device_ops *tag_ops;
- /* Copies for faster access in master receive hot path */
+ /* Copies for faster access in conduit receive hot path */
struct dsa_switch_tree *dst;
struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
@@ -281,9 +281,9 @@ struct dsa_port {
u8 lag_tx_enabled:1;
- /* Master state bits, valid only on CPU ports */
- u8 master_admin_up:1;
- u8 master_oper_up:1;
+ /* conduit state bits, valid only on CPU ports */
+ u8 conduit_admin_up:1;
+ u8 conduit_oper_up:1;
/* Valid only on user ports */
u8 cpu_port_in_lag:1;
@@ -303,7 +303,7 @@ struct dsa_port {
struct list_head list;
/*
- * Original copy of the master netdev ethtool_ops
+ * Original copy of the conduit netdev ethtool_ops
*/
const struct ethtool_ops *orig_ethtool_ops;
@@ -314,9 +314,17 @@ struct dsa_port {
struct list_head fdbs;
struct list_head mdbs;
- /* List of VLANs that CPU and DSA ports are members of. */
struct mutex vlans_lock;
- struct list_head vlans;
+ union {
+ /* List of VLANs that CPU and DSA ports are members of.
+ * Access to this is serialized by the sleepable @vlans_lock.
+ */
+ struct list_head vlans;
+ /* List of VLANs that user ports are members of.
+ * Access to this is serialized by netif_addr_lock_bh().
+ */
+ struct list_head user_vlans;
+ };
};
/* TODO: ideally DSA ports would have a single dp->link_dp member,
@@ -444,10 +452,10 @@ struct dsa_switch {
const struct dsa_switch_ops *ops;
/*
- * Slave mii_bus and devices for the individual ports.
+ * User mii_bus and devices for the individual ports.
*/
u32 phys_mii_mask;
- struct mii_bus *slave_mii_bus;
+ struct mii_bus *user_mii_bus;
/* Ageing Time limits in msecs */
unsigned int ageing_time_min;
@@ -512,10 +520,10 @@ static inline bool dsa_port_is_unused(struct dsa_port *dp)
return dp->type == DSA_PORT_TYPE_UNUSED;
}
-static inline bool dsa_port_master_is_operational(struct dsa_port *dp)
+static inline bool dsa_port_conduit_is_operational(struct dsa_port *dp)
{
- return dsa_port_is_cpu(dp) && dp->master_admin_up &&
- dp->master_oper_up;
+ return dsa_port_is_cpu(dp) && dp->conduit_admin_up &&
+ dp->conduit_oper_up;
}
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
@@ -705,12 +713,12 @@ static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
return dsa_port_lag_dev_get(dp) == lag->dev;
}
-static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp)
+static inline struct net_device *dsa_port_to_conduit(const struct dsa_port *dp)
{
if (dp->cpu_port_in_lag)
return dsa_port_lag_dev_get(dp->cpu_dp);
- return dp->cpu_dp->master;
+ return dp->cpu_dp->conduit;
}
static inline
@@ -724,7 +732,7 @@ struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
else if (dp->hsr_dev)
return dp->hsr_dev;
- return dp->slave;
+ return dp->user;
}
static inline struct net_device *
@@ -826,9 +834,9 @@ struct dsa_switch_ops {
int (*connect_tag_protocol)(struct dsa_switch *ds,
enum dsa_tag_protocol proto);
- int (*port_change_master)(struct dsa_switch *ds, int port,
- struct net_device *master,
- struct netlink_ext_ack *extack);
+ int (*port_change_conduit)(struct dsa_switch *ds, int port,
+ struct net_device *conduit,
+ struct netlink_ext_ack *extack);
/* Optional switch-wide initialization and destruction methods */
int (*setup)(struct dsa_switch *ds);
@@ -865,12 +873,15 @@ struct dsa_switch_ops {
struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
int port,
phy_interface_t iface);
- int (*phylink_mac_link_state)(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
+ int (*phylink_mac_prepare)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
void (*phylink_mac_config)(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state);
- void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port);
+ int (*phylink_mac_finish)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
@@ -958,6 +969,16 @@ struct dsa_switch_ops {
struct phy_device *phy);
void (*port_disable)(struct dsa_switch *ds, int port);
+
+ /*
+ * Notification for MAC address changes on user ports. Drivers can
+ * currently only veto operations. They should not use the method to
+ * program the hardware, since the operation is not rolled back in case
+ * of other errors.
+ */
+ int (*port_set_mac_address)(struct dsa_switch *ds, int port,
+ const unsigned char *addr);
+
/*
* Compatibility between device trees defining multiple CPU ports and
* drivers which are not OK to use by default the numerically smallest
@@ -1187,7 +1208,8 @@ struct dsa_switch_ops {
* HSR integration
*/
int (*port_hsr_join)(struct dsa_switch *ds, int port,
- struct net_device *hsr);
+ struct net_device *hsr,
+ struct netlink_ext_ack *extack);
int (*port_hsr_leave)(struct dsa_switch *ds, int port,
struct net_device *hsr);
@@ -1211,11 +1233,11 @@ struct dsa_switch_ops {
int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
/*
- * DSA master tracking operations
+ * DSA conduit tracking operations
*/
- void (*master_state_change)(struct dsa_switch *ds,
- const struct net_device *master,
- bool operational);
+ void (*conduit_state_change)(struct dsa_switch *ds,
+ const struct net_device *conduit,
+ bool operational);
};
#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
@@ -1352,9 +1374,9 @@ static inline int dsa_switch_resume(struct dsa_switch *ds)
#endif /* CONFIG_PM_SLEEP */
#if IS_ENABLED(CONFIG_NET_DSA)
-bool dsa_slave_dev_check(const struct net_device *dev);
+bool dsa_user_dev_check(const struct net_device *dev);
#else
-static inline bool dsa_slave_dev_check(const struct net_device *dev)
+static inline bool dsa_user_dev_check(const struct net_device *dev)
{
return false;
}
diff --git a/include/net/dsa_stubs.h b/include/net/dsa_stubs.h
index 361811750a54..6f384897f287 100644
--- a/include/net/dsa_stubs.h
+++ b/include/net/dsa_stubs.h
@@ -13,14 +13,14 @@
extern const struct dsa_stubs *dsa_stubs;
struct dsa_stubs {
- int (*master_hwtstamp_validate)(struct net_device *dev,
- const struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack);
+ int (*conduit_hwtstamp_validate)(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
};
-static inline int dsa_master_hwtstamp_validate(struct net_device *dev,
- const struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
if (!netdev_uses_dsa(dev))
return 0;
@@ -29,18 +29,18 @@ static inline int dsa_master_hwtstamp_validate(struct net_device *dev,
* netdev_uses_dsa() returns true, the dsa_core module is still
* registered, and so, dsa_unregister_stubs() couldn't have run.
* For netdev_uses_dsa() to start returning false, it would imply that
- * dsa_master_teardown() has executed, which requires rtnl_lock().
+ * dsa_conduit_teardown() has executed, which requires rtnl_lock().
*/
ASSERT_RTNL();
- return dsa_stubs->master_hwtstamp_validate(dev, config, extack);
+ return dsa_stubs->conduit_hwtstamp_validate(dev, config, extack);
}
#else
-static inline int dsa_master_hwtstamp_validate(struct net_device *dev,
- const struct kernel_hwtstamp_config *config,
- struct netlink_ext_ack *extack)
+static inline int dsa_conduit_hwtstamp_validate(struct net_device *dev,
+ const struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return 0;
}
diff --git a/include/net/dst.h b/include/net/dst.h
index 78884429deed..f5dfc8fb7b37 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -222,13 +222,6 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr
return msecs_to_jiffies(dst_metric(dst, metric));
}
-static inline u32
-dst_allfrag(const struct dst_entry *dst)
-{
- int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
- return ret;
-}
-
static inline int
dst_metric_locked(const struct dst_entry *dst, int metric)
{
@@ -392,10 +385,10 @@ static inline int dst_discard(struct sk_buff *skb)
{
return dst_discard_out(&init_net, skb->sk, skb);
}
-void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
int initial_obsolete, unsigned short flags);
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
- struct net_device *dev, int initial_ref, int initial_obsolete,
+ struct net_device *dev, int initial_obsolete,
unsigned short flags);
struct dst_entry *dst_destroy(struct dst_entry *dst);
void dst_dev_put(struct dst_entry *dst);
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 632086b2f644..6d1c8541183d 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -23,7 +23,7 @@ struct dst_ops {
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
- struct net_device *dev, int how);
+ struct net_device *dev);
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 82da359bca03..d17855c52ef9 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -172,8 +172,7 @@ void fib_rules_unregister(struct fib_rules_ops *);
int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
-int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
- u32 flags);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table);
bool fib_rule_matchall(const struct fib_rule *rule);
int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
struct netlink_ext_ack *extack);
diff --git a/include/net/flow.h b/include/net/flow.h
index bb8651a6eaa7..335bbc52171c 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -40,8 +40,8 @@ struct flowi_common {
#define FLOWI_FLAG_KNOWN_NH 0x02
__u32 flowic_secid;
kuid_t flowic_uid;
- struct flowi_tunnel flowic_tun_key;
__u32 flowic_multipath_hash;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
@@ -116,11 +116,10 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
}
/* Reset some input parameters after previous lookup */
-static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
+static inline void flowi4_update_output(struct flowi4 *fl4, int oif,
__be32 daddr, __be32 saddr)
{
fl4->flowi4_oif = oif;
- fl4->flowi4_tos = tos;
fl4->daddr = daddr;
fl4->saddr = saddr;
}
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 85b2281576ed..1a7131d6cb0e 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -243,10 +243,12 @@ struct flow_dissector_key_ip {
* struct flow_dissector_key_meta:
* @ingress_ifindex: ingress ifindex
* @ingress_iftype: ingress interface type
+ * @l2_miss: packet did not match an L2 entry during forwarding
*/
struct flow_dissector_key_meta {
int ingress_ifindex;
u16 ingress_iftype;
+ u8 l2_miss;
};
/**
@@ -299,6 +301,34 @@ struct flow_dissector_key_l2tpv3 {
__be32 session_id;
};
+/**
+ * struct flow_dissector_key_ipsec:
+ * @spi: identifier for a ipsec connection
+ */
+struct flow_dissector_key_ipsec {
+ __be32 spi;
+};
+
+/**
+ * struct flow_dissector_key_cfm
+ * @mdl_ver: maintenance domain level (mdl) and cfm protocol version
+ * @opcode: code specifying a type of cfm protocol packet
+ *
+ * See 802.1ag, ITU-T G.8013/Y.1731
+ * 1 2
+ * |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | mdl | version | opcode |
+ * +-----+---------+-+-+-+-+-+-+-+-+
+ */
+struct flow_dissector_key_cfm {
+ u8 mdl_ver;
+ u8 opcode;
+};
+
+#define FLOW_DIS_CFM_MDL_MASK GENMASK(7, 5)
+#define FLOW_DIS_CFM_MDL_MAX 7
+
enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
@@ -331,6 +361,8 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */
FLOW_DISSECTOR_KEY_PPPOE, /* struct flow_dissector_key_pppoe */
FLOW_DISSECTOR_KEY_L2TPV3, /* struct flow_dissector_key_l2tpv3 */
+ FLOW_DISSECTOR_KEY_CFM, /* struct flow_dissector_key_cfm */
+ FLOW_DISSECTOR_KEY_IPSEC, /* struct flow_dissector_key_ipsec */
FLOW_DISSECTOR_KEY_MAX,
};
@@ -347,7 +379,8 @@ struct flow_dissector_key {
};
struct flow_dissector {
- unsigned int used_keys; /* each bit repesents presence of one key id */
+ unsigned long long used_keys;
+ /* each bit represents presence of one key id */
unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
};
@@ -407,7 +440,7 @@ void skb_flow_get_icmp_tci(const struct sk_buff *skb,
static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
{
- return flow_dissector->used_keys & (1 << key_id);
+ return flow_dissector->used_keys & (1ULL << key_id);
}
static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissector,
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 118082eae48c..314087a5e181 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -64,6 +64,10 @@ struct flow_match_tcp {
struct flow_dissector_key_tcp *key, *mask;
};
+struct flow_match_ipsec {
+ struct flow_dissector_key_ipsec *key, *mask;
+};
+
struct flow_match_mpls {
struct flow_dissector_key_mpls *key, *mask;
};
@@ -116,6 +120,8 @@ void flow_rule_match_ports_range(const struct flow_rule *rule,
struct flow_match_ports_range *out);
void flow_rule_match_tcp(const struct flow_rule *rule,
struct flow_match_tcp *out);
+void flow_rule_match_ipsec(const struct flow_rule *rule,
+ struct flow_match_ipsec *out);
void flow_rule_match_icmp(const struct flow_rule *rule,
struct flow_match_icmp *out);
void flow_rule_match_mpls(const struct flow_rule *rule,
@@ -327,7 +333,7 @@ struct flow_action_entry {
struct flow_action {
unsigned int num_entries;
- struct flow_action_entry entries[];
+ struct flow_action_entry entries[] __counted_by(num_entries);
};
static inline bool flow_action_has_entries(const struct flow_action *action)
diff --git a/include/net/fq.h b/include/net/fq.h
index 07b5aff6ec58..99fbe4127b95 100644
--- a/include/net/fq.h
+++ b/include/net/fq.h
@@ -98,9 +98,4 @@ typedef bool fq_skb_filter_t(struct fq *,
struct sk_buff *,
void *);
-typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
- struct fq_tin *,
- int idx,
- struct sk_buff *);
-
#endif
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index ed4622dd4828..e61469129402 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -8,10 +8,15 @@
#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN)
+/* Binding to multicast group requires %CAP_NET_ADMIN */
+#define GENL_MCAST_CAP_NET_ADMIN BIT(0)
+/* Binding to multicast group requires %CAP_SYS_ADMIN */
+#define GENL_MCAST_CAP_SYS_ADMIN BIT(1)
+
/**
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
- * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM)
+ * @flags: GENL_MCAST_* flags
*/
struct genl_multicast_group {
char name[GENL_NAMSIZ];
@@ -49,6 +54,9 @@ struct genl_info;
* @split_ops: the split do/dump form of operation definition
* @n_split_ops: number of entries in @split_ops, not that with split do/dump
* ops the number of entries is not the same as number of commands
+ * @sock_priv_size: the size of per-socket private memory
+ * @sock_priv_init: the per-socket private memory initializer
+ * @sock_priv_destroy: the per-socket private memory destructor
*
* Attribute policies (the combination of @policy and @maxattr fields)
* can be attached at the family level or at the operation level.
@@ -82,20 +90,26 @@ struct genl_family {
const struct genl_multicast_group *mcgrps;
struct module *module;
+ size_t sock_priv_size;
+ void (*sock_priv_init)(void *priv);
+ void (*sock_priv_destroy)(void *priv);
+
/* private: internal use only */
/* protocol family identifier */
int id;
/* starting number of multicast group IDs in this family */
unsigned int mcgrp_offset;
+ /* list of per-socket privs */
+ struct xarray *sock_privs;
};
/**
* struct genl_info - receiving information
* @snd_seq: sending sequence number
* @snd_portid: netlink portid of sender
+ * @family: generic netlink family
* @nlhdr: netlink message header
* @genlhdr: generic netlink message header
- * @userhdr: user specific header
* @attrs: netlink attributes
* @_net: network namespace
* @user_ptr: user pointers
@@ -104,16 +118,16 @@ struct genl_family {
struct genl_info {
u32 snd_seq;
u32 snd_portid;
- struct nlmsghdr * nlhdr;
+ const struct genl_family *family;
+ const struct nlmsghdr * nlhdr;
struct genlmsghdr * genlhdr;
- void * userhdr;
struct nlattr ** attrs;
possible_net_t _net;
void * user_ptr[2];
struct netlink_ext_ack *extack;
};
-static inline struct net *genl_info_net(struct genl_info *info)
+static inline struct net *genl_info_net(const struct genl_info *info)
{
return read_pnet(&info->_net);
}
@@ -123,6 +137,11 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
write_pnet(&info->_net, net);
}
+static inline void *genl_info_userhdr(const struct genl_info *info)
+{
+ return (u8 *)info->genlhdr + GENL_HDRLEN;
+}
+
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
#define GENL_SET_ERR_MSG_FMT(info, msg, args...) \
@@ -244,14 +263,13 @@ struct genl_split_ops {
/**
* struct genl_dumpit_info - info that is available during dumpit op call
- * @family: generic netlink family - for internal genl code usage
* @op: generic netlink ops - for internal genl code usage
* @attrs: netlink attributes
+ * @info: struct genl_info describing the request
*/
struct genl_dumpit_info {
- const struct genl_family *family;
struct genl_split_ops op;
- struct nlattr **attrs;
+ struct genl_info info;
};
static inline const struct genl_dumpit_info *
@@ -260,6 +278,40 @@ genl_dumpit_info(struct netlink_callback *cb)
return cb->data;
}
+static inline const struct genl_info *
+genl_info_dump(struct netlink_callback *cb)
+{
+ return &genl_dumpit_info(cb)->info;
+}
+
+/**
+ * genl_info_init_ntf() - initialize genl_info for notifications
+ * @info: genl_info struct to set up
+ * @family: pointer to the genetlink family
+ * @cmd: command to be used in the notification
+ *
+ * Initialize a locally declared struct genl_info to pass to various APIs.
+ * Intended to be used when creating notifications.
+ */
+static inline void
+genl_info_init_ntf(struct genl_info *info, const struct genl_family *family,
+ u8 cmd)
+{
+ struct genlmsghdr *hdr = (void *) &info->user_ptr[0];
+
+ memset(info, 0, sizeof(*info));
+ info->family = family;
+ info->genlhdr = hdr;
+ hdr->cmd = cmd;
+}
+
+static inline bool genl_info_is_ntf(const struct genl_info *info)
+{
+ return !info->nlhdr;
+}
+
+void *__genl_sk_priv_get(struct genl_family *family, struct sock *sk);
+void *genl_sk_priv_get(struct genl_family *family, struct sock *sk);
int genl_register_family(struct genl_family *family);
int genl_unregister_family(const struct genl_family *family);
void genl_notify(const struct genl_family *family, struct sk_buff *skb,
@@ -268,6 +320,32 @@ void genl_notify(const struct genl_family *family, struct sk_buff *skb,
void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
const struct genl_family *family, int flags, u8 cmd);
+static inline void *
+__genlmsg_iput(struct sk_buff *skb, const struct genl_info *info, int flags)
+{
+ return genlmsg_put(skb, info->snd_portid, info->snd_seq, info->family,
+ flags, info->genlhdr->cmd);
+}
+
+/**
+ * genlmsg_iput - start genetlink message based on genl_info
+ * @skb: skb in which message header will be placed
+ * @info: genl_info as provided to do/dump handlers
+ *
+ * Convenience wrapper which starts a genetlink message based on
+ * information in user request. @info should be either the struct passed
+ * by genetlink core to do/dump handlers (when constructing replies to
+ * such requests) or a struct initialized by genl_info_init_ntf()
+ * when constructing notifications.
+ *
+ * Returns pointer to new genetlink header.
+ */
+static inline void *
+genlmsg_iput(struct sk_buff *skb, const struct genl_info *info)
+{
+ return __genlmsg_iput(skb, info, 0);
+}
+
/**
* genlmsg_nlhdr - Obtain netlink header from user specified header
* @user_hdr: user header as returned from genlmsg_put()
@@ -374,6 +452,35 @@ static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
}
/**
+ * genlmsg_multicast_netns_filtered - multicast a netlink message
+ * to a specific netns with filter
+ * function
+ * @family: the generic netlink family
+ * @net: the net namespace
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
+ * @flags: allocation flags
+ * @filter: filter function
+ * @filter_data: filter function private data
+ *
+ * Return: 0 on success, negative error code for failure.
+ */
+static inline int
+genlmsg_multicast_netns_filtered(const struct genl_family *family,
+ struct net *net, struct sk_buff *skb,
+ u32 portid, unsigned int group, gfp_t flags,
+ netlink_filter_fn filter,
+ void *filter_data)
+{
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+ group = family->mcgrp_offset + group;
+ return nlmsg_multicast_filtered(net->genl_sock, skb, portid, group,
+ flags, filter, filter_data);
+}
+
+/**
* genlmsg_multicast_netns - multicast a netlink message to a specific netns
* @family: the generic netlink family
* @net: the net namespace
@@ -386,10 +493,8 @@ static inline int genlmsg_multicast_netns(const struct genl_family *family,
struct net *net, struct sk_buff *skb,
u32 portid, unsigned int group, gfp_t flags)
{
- if (WARN_ON_ONCE(group >= family->n_mcgrps))
- return -EINVAL;
- group = family->mcgrp_offset + group;
- return nlmsg_multicast(net->genl_sock, skb, portid, group, flags);
+ return genlmsg_multicast_netns_filtered(family, net, skb, portid,
+ group, flags, NULL, NULL);
}
/**
diff --git a/include/net/gro.h b/include/net/gro.h
index a4fab706240d..b435f0ddbf64 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -11,11 +11,23 @@
#include <net/udp.h>
struct napi_gro_cb {
- /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
+ union {
+ struct {
+ /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
+ void *frag0;
- /* Length of frag0. */
- unsigned int frag0_len;
+ /* Length of frag0. */
+ unsigned int frag0_len;
+ };
+
+ struct {
+ /* used in skb_gro_receive() slow path */
+ struct sk_buff *last;
+
+ /* jiffies when first packet was created/queued */
+ unsigned long age;
+ };
+ };
/* This indicates where we are processing relative to skb->data. */
int data_offset;
@@ -29,12 +41,9 @@ struct napi_gro_cb {
/* Number of segments aggregated. */
u16 count;
- /* Used in ipv6_gro_receive() and foo-over-udp */
+ /* Used in ipv6_gro_receive() and foo-over-udp and esp-in-udp */
u16 proto;
- /* jiffies when first packet was created/queued */
- unsigned long age;
-
/* Used in napi_gro_cb::free */
#define NAPI_GRO_FREE 1
#define NAPI_GRO_FREE_STOLEN_HEAD 2
@@ -77,9 +86,6 @@ struct napi_gro_cb {
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
-
- /* used in skb_gro_receive() slow path */
- struct sk_buff *last;
};
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
@@ -446,5 +452,49 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
gro_normal_list(napi);
}
+/* This function is the alternative of 'inet_iif' and 'inet_sdif'
+ * functions in case we can not rely on fields of IPCB.
+ *
+ * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
+ * The caller must hold the RCU read lock.
+ */
+static inline void inet_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
+{
+ *iif = inet_iif(skb) ?: skb->dev->ifindex;
+ *sdif = 0;
+
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (netif_is_l3_slave(skb->dev)) {
+ struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
+
+ *sdif = *iif;
+ *iif = master ? master->ifindex : 0;
+ }
+#endif
+}
+
+/* This function is the alternative of 'inet6_iif' and 'inet6_sdif'
+ * functions in case we can not rely on fields of IP6CB.
+ *
+ * The caller must verify skb_valid_dst(skb) is false and skb->dev is initialized.
+ * The caller must hold the RCU read lock.
+ */
+static inline void inet6_get_iif_sdif(const struct sk_buff *skb, int *iif, int *sdif)
+{
+ /* using skb->dev->ifindex because skb_dst(skb) is not initialized */
+ *iif = skb->dev->ifindex;
+ *sdif = 0;
+
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (netif_is_l3_slave(skb->dev)) {
+ struct net_device *master = netdev_master_upper_dev_get_rcu(skb->dev);
+
+ *sdif = *iif;
+ *iif = master ? master->ifindex : 0;
+ }
+#endif
+}
+
+extern struct list_head offload_base;
#endif /* _NET_IPV6_GRO_H */
diff --git a/include/net/gso.h b/include/net/gso.h
new file mode 100644
index 000000000000..29975440cad5
--- /dev/null
+++ b/include/net/gso.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _NET_GSO_H
+#define _NET_GSO_H
+
+#include <linux/skbuff.h>
+
+/* Keeps track of mac header offset relative to skb->head.
+ * It is useful for TSO of Tunneling protocol. e.g. GRE.
+ * For non-tunnel skb it points to skb_mac_header() and for
+ * tunnel skb it points to outer mac header.
+ * Keeps track of level of encapsulation of network headers.
+ */
+struct skb_gso_cb {
+ union {
+ int mac_offset;
+ int data_offset;
+ };
+ int encap_level;
+ __wsum csum;
+ __u16 csum_start;
+};
+#define SKB_GSO_CB_OFFSET 32
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
+
+static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
+{
+ return (skb_mac_header(inner_skb) - inner_skb->head) -
+ SKB_GSO_CB(inner_skb)->mac_offset;
+}
+
+static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
+{
+ int new_headroom, headroom;
+ int ret;
+
+ headroom = skb_headroom(skb);
+ ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ new_headroom = skb_headroom(skb);
+ SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
+ return 0;
+}
+
+static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
+{
+ /* Do not update partial checksums if remote checksum is enabled. */
+ if (skb->remcsum_offload)
+ return;
+
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
+}
+
+/* Compute the checksum for a gso segment. First compute the checksum value
+ * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
+ * then add in skb->csum (checksum from csum_start to end of packet).
+ * skb->csum and csum_start are then updated to reflect the checksum of the
+ * resultant packet starting from the transport header-- the resultant checksum
+ * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
+ * header.
+ */
+static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
+{
+ unsigned char *csum_start = skb_transport_header(skb);
+ int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
+ __wsum partial = SKB_GSO_CB(skb)->csum;
+
+ SKB_GSO_CB(skb)->csum = res;
+ SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
+
+ return csum_fold(csum_partial(csum_start, plen, partial));
+}
+
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, bool tx_path);
+
+static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ return __skb_gso_segment(skb, features, true);
+}
+
+struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, __be16 type);
+
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
+
+bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
+
+bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
+
+static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+ int pulled_hlen, u16 mac_offset,
+ int mac_len)
+{
+ skb->protocol = protocol;
+ skb->encapsulation = 1;
+ skb_push(skb, pulled_hlen);
+ skb_reset_transport_header(skb);
+ skb->mac_header = mac_offset;
+ skb->network_header = skb->mac_header + mac_len;
+ skb->mac_len = mac_len;
+}
+
+#endif /* _NET_GSO_H */
diff --git a/include/net/handshake.h b/include/net/handshake.h
index 2e26e436e85f..8ebd4f9ed26e 100644
--- a/include/net/handshake.h
+++ b/include/net/handshake.h
@@ -40,5 +40,10 @@ int tls_server_hello_x509(const struct tls_handshake_args *args, gfp_t flags);
int tls_server_hello_psk(const struct tls_handshake_args *args, gfp_t flags);
bool tls_handshake_cancel(struct sock *sk);
+void tls_handshake_close(struct socket *sock);
+
+u8 tls_get_record_type(const struct sock *sk, const struct cmsghdr *msg);
+void tls_alert_recv(const struct sock *sk, const struct msghdr *msg,
+ u8 *level, u8 *description);
#endif /* _NET_HANDSHAKE_H */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index f980a72f2ce6..925bac726a92 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -21,7 +21,7 @@
#include <asm/unaligned.h>
/**
- * struct ieee82011_radiotap_header - base radiotap header
+ * struct ieee80211_radiotap_header - base radiotap header
*/
struct ieee80211_radiotap_header {
/**
@@ -535,8 +535,16 @@ enum ieee80211_radiotap_eht_usig_common {
IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN = 0x00000008,
IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN = 0x00000010,
IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC = 0x00000020,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED = 0x00000040,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK = 0x00000080,
IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER = 0x00007000,
IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW = 0x00038000,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ = 0,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_40MHZ = 1,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_80MHZ = 2,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_160MHZ = 3,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_1 = 4,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_320MHZ_2 = 5,
IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL = 0x00040000,
IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR = 0x01f80000,
IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP = 0xfe000000,
@@ -573,6 +581,7 @@ enum ieee80211_radiotap_eht_usig_tb {
/**
* ieee80211_get_radiotap_len - get radiotap header length
+ * @data: pointer to the header
*/
static inline u16 ieee80211_get_radiotap_len(const char *data)
{
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index da8a3e648c7a..4de858f9929e 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -74,6 +74,10 @@ struct ieee802154_beacon_hdr {
#endif
} __packed;
+struct ieee802154_mac_cmd_pl {
+ u8 cmd_id;
+} __packed;
+
struct ieee802154_sechdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 level:3,
@@ -121,6 +125,35 @@ struct ieee802154_hdr_fc {
#endif
};
+struct ieee802154_assoc_req_pl {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 reserved1:1,
+ device_type:1,
+ power_source:1,
+ rx_on_when_idle:1,
+ assoc_type:1,
+ reserved2:1,
+ security_cap:1,
+ alloc_addr:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 alloc_addr:1,
+ security_cap:1,
+ reserved2:1,
+ assoc_type:1,
+ rx_on_when_idle:1,
+ power_source:1,
+ device_type:1,
+ reserved1:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+struct ieee802154_assoc_resp_pl {
+ __le16 short_addr;
+ u8 status;
+} __packed;
+
enum ieee802154_frame_version {
IEEE802154_2003_STD,
IEEE802154_2006_STD,
@@ -136,6 +169,19 @@ enum ieee802154_addressing_mode {
IEEE802154_EXTENDED_ADDRESSING,
};
+enum ieee802154_association_status {
+ IEEE802154_ASSOCIATION_SUCCESSFUL = 0x00,
+ IEEE802154_PAN_AT_CAPACITY = 0x01,
+ IEEE802154_PAN_ACCESS_DENIED = 0x02,
+ IEEE802154_HOPPING_SEQUENCE_OFFSET_DUP = 0x03,
+ IEEE802154_FAST_ASSOCIATION_SUCCESSFUL = 0x80,
+};
+
+enum ieee802154_disassociation_reason {
+ IEEE802154_COORD_WISHES_DEVICE_TO_LEAVE = 0x1,
+ IEEE802154_DEVICE_WISHES_TO_LEAVE = 0x2,
+};
+
struct ieee802154_hdr {
struct ieee802154_hdr_fc fc;
u8 seq;
@@ -149,6 +195,34 @@ struct ieee802154_beacon_frame {
struct ieee802154_beacon_hdr mac_pl;
};
+struct ieee802154_mac_cmd_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+};
+
+struct ieee802154_beacon_req_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+};
+
+struct ieee802154_association_req_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+ struct ieee802154_assoc_req_pl assoc_req_pl;
+};
+
+struct ieee802154_association_resp_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+ struct ieee802154_assoc_resp_pl assoc_resp_pl;
+};
+
+struct ieee802154_disassociation_notif_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+ u8 disassoc_pl;
+};
+
/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from
* the contents of hdr will be, and the actual value of those bits in
* hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
@@ -174,9 +248,13 @@ int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
*/
int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
-/* pushes a beacon frame into an skb */
+/* pushes/pulls various frame types into/from an skb */
int ieee802154_beacon_push(struct sk_buff *skb,
struct ieee802154_beacon_frame *beacon);
+int ieee802154_mac_cmd_push(struct sk_buff *skb, void *frame,
+ const void *pl, unsigned int pl_len);
+int ieee802154_mac_cmd_pl_pull(struct sk_buff *skb,
+ struct ieee802154_mac_cmd_pl *mac_pl);
int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index c8490729b4ae..f07642264c1e 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -22,10 +22,6 @@
#define IF_RS_SENT 0x10
#define IF_READY 0x80000000
-/* prefix flags */
-#define IF_PREFIX_ONLINK 0x01
-#define IF_PREFIX_AUTOCONF 0x02
-
enum {
INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
@@ -89,7 +85,7 @@ struct ip6_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- struct in6_addr sl_addr[];
+ struct in6_addr sl_addr[] __counted_by(sl_max);
};
#define IP6_SFBLOCK 10 /* allocate this many at once */
diff --git a/include/net/ila.h b/include/net/ila.h
deleted file mode 100644
index 73ebe5eab272..000000000000
--- a/include/net/ila.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * ILA kernel interface
- *
- * Copyright (c) 2015 Tom Herbert <tom@herbertland.com>
- */
-
-#ifndef _NET_ILA_H
-#define _NET_ILA_H
-
-struct sk_buff;
-
-int ila_xlat_outgoing(struct sk_buff *skb);
-int ila_xlat_incoming(struct sk_buff *skb);
-
-#endif /* _NET_ILA_H */
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 56f1286583d3..533a7337865a 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -48,6 +48,22 @@ struct sock *__inet6_lookup_established(struct net *net,
const u16 hnum, const int dif,
const int sdif);
+typedef u32 (inet6_ehashfn_t)(const struct net *net,
+ const struct in6_addr *laddr, const u16 lport,
+ const struct in6_addr *faddr, const __be16 fport);
+
+inet6_ehashfn_t inet6_ehashfn;
+
+INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn);
+
+struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk,
+ struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr,
+ __be16 sport,
+ const struct in6_addr *daddr,
+ unsigned short hnum,
+ inet6_ehashfn_t *ehashfn);
+
struct sock *inet6_lookup_listener(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
@@ -57,6 +73,15 @@ struct sock *inet6_lookup_listener(struct net *net,
const unsigned short hnum,
const int dif, const int sdif);
+struct sock *inet6_lookup_run_sk_lookup(struct net *net,
+ int protocol,
+ struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr,
+ const __be16 sport,
+ const struct in6_addr *daddr,
+ const u16 hnum, const int dif,
+ inet6_ehashfn_t *ehashfn);
+
static inline struct sock *__inet6_lookup(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
@@ -78,6 +103,46 @@ static inline struct sock *__inet6_lookup(struct net *net,
daddr, hnum, dif, sdif);
}
+static inline
+struct sock *inet6_steal_sock(struct net *net, struct sk_buff *skb, int doff,
+ const struct in6_addr *saddr, const __be16 sport,
+ const struct in6_addr *daddr, const __be16 dport,
+ bool *refcounted, inet6_ehashfn_t *ehashfn)
+{
+ struct sock *sk, *reuse_sk;
+ bool prefetched;
+
+ sk = skb_steal_sock(skb, refcounted, &prefetched);
+ if (!sk)
+ return NULL;
+
+ if (!prefetched || !sk_fullsock(sk))
+ return sk;
+
+ if (sk->sk_protocol == IPPROTO_TCP) {
+ if (sk->sk_state != TCP_LISTEN)
+ return sk;
+ } else if (sk->sk_protocol == IPPROTO_UDP) {
+ if (sk->sk_state != TCP_CLOSE)
+ return sk;
+ } else {
+ return sk;
+ }
+
+ reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff,
+ saddr, sport, daddr, ntohs(dport),
+ ehashfn);
+ if (!reuse_sk)
+ return sk;
+
+ /* We've chosen a new reuseport sock which is never refcounted. This
+ * implies that sk also isn't refcounted.
+ */
+ WARN_ON_ONCE(*refcounted);
+
+ return reuse_sk;
+}
+
static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be16 sport,
@@ -85,14 +150,20 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
int iif, int sdif,
bool *refcounted)
{
- struct sock *sk = skb_steal_sock(skb, refcounted);
-
+ struct net *net = dev_net(skb_dst(skb)->dev);
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct sock *sk;
+
+ sk = inet6_steal_sock(net, skb, doff, &ip6h->saddr, sport, &ip6h->daddr, dport,
+ refcounted, inet6_ehashfn);
+ if (IS_ERR(sk))
+ return NULL;
if (sk)
return sk;
- return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
- doff, &ipv6_hdr(skb)->saddr, sport,
- &ipv6_hdr(skb)->daddr, ntohs(dport),
+ return __inet6_lookup(net, hashinfo, skb,
+ doff, &ip6h->saddr, sport,
+ &ip6h->daddr, ntohs(dport),
iif, sdif, refcounted);
}
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index cec453c18f1d..f50a644d87a9 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -31,16 +31,19 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
int inet_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern);
+void __inet_accept(struct socket *sock, struct socket *newsock,
+ struct sock *newsk);
int inet_send_prepare(struct sock *sk);
int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
-ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
+void inet_splice_eof(struct socket *sock);
int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int inet_shutdown(struct socket *sock, int how);
int inet_listen(struct socket *sock, int backlog);
+int __inet_listen_sk(struct sock *sk, int backlog);
void inet_sock_destruct(struct sock *sk);
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len);
/* Don't allocate port at this moment, defer to connect. */
#define BIND_FORCE_ADDRESS_NO_PORT (1 << 0)
/* Grab and release socket lock. */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index c2b15f7e5516..9ab4bf704e86 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -44,7 +44,6 @@ struct inet_connection_sock_af_ops {
struct request_sock *req_unhash,
bool *own_req);
u16 net_header_len;
- u16 net_frag_header_len;
u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
@@ -114,7 +113,10 @@ struct inet_connection_sock {
__u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */
__u8 retry; /* Number of attempts */
- __u32 ato; /* Predicted tick of soft clock */
+ #define ATO_BITS 8
+ __u32 ato:ATO_BITS, /* Predicted tick of soft clock */
+ lrcv_flowlabel:20, /* last received ipv6 flowlabel */
+ unused:4;
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */
__u16 last_seg_size; /* Size of last incoming segment */
@@ -164,7 +166,8 @@ enum inet_csk_ack_state_t {
ICSK_ACK_TIMER = 2,
ICSK_ACK_PUSHED = 4,
ICSK_ACK_PUSHED2 = 8,
- ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
+ ICSK_ACK_NOW = 16, /* Send the next ACK immediately (once) */
+ ICSK_ACK_NOMEM = 32,
};
void inet_csk_init_xmit_timers(struct sock *sk,
@@ -324,11 +327,10 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
-#define TCP_PINGPONG_THRESH 1
-
static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
{
- inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
+ inet_csk(sk)->icsk_ack.pingpong =
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh);
}
static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
@@ -338,12 +340,29 @@ static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
{
- return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
+ return inet_csk(sk)->icsk_ack.pingpong >=
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh);
+}
+
+static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ack.pingpong < U8_MAX)
+ icsk->icsk_ack.pingpong++;
}
-static inline bool inet_csk_has_ulp(struct sock *sk)
+static inline bool inet_csk_has_ulp(const struct sock *sk)
{
- return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
+ return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
+}
+
+static inline void inet_init_csk_locks(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
+ spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
}
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 325ad893f624..153960663ce4 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -29,7 +29,7 @@ struct fqdir {
};
/**
- * fragment queue flags
+ * enum: fragment queue flags
*
* @INET_FRAG_FIRST_IN: first fragment has arrived
* @INET_FRAG_LAST_IN: final fragment has arrived
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 99bd823e97f6..7f1b38458743 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -88,7 +88,7 @@ struct inet_bind_bucket {
unsigned short fast_sk_family;
bool fast_ipv6_only;
struct hlist_node node;
- struct hlist_head owners;
+ struct hlist_head bhash2;
};
struct inet_bind2_bucket {
@@ -96,22 +96,17 @@ struct inet_bind2_bucket {
int l3mdev;
unsigned short port;
#if IS_ENABLED(CONFIG_IPV6)
- unsigned short family;
-#endif
- union {
-#if IS_ENABLED(CONFIG_IPV6)
- struct in6_addr v6_rcv_saddr;
+ unsigned short addr_type;
+ struct in6_addr v6_rcv_saddr;
+#define rcv_saddr v6_rcv_saddr.s6_addr32[3]
+#else
+ __be32 rcv_saddr;
#endif
- __be32 rcv_saddr;
- };
/* Node in the bhash2 inet_bind_hashbucket chain */
struct hlist_node node;
+ struct hlist_node bhash_node;
/* List of sockets hashed to this bucket */
struct hlist_head owners;
- /* bhash has twsk in owners, but bhash2 has twsk in
- * deathrow not to add a member in struct sock_common.
- */
- struct hlist_head deathrow;
};
static inline struct net *ib_net(const struct inet_bind_bucket *ib)
@@ -177,7 +172,7 @@ struct inet_hashinfo {
struct inet_listen_hashbucket *lhash2;
bool pernet;
-};
+} ____cacheline_aligned_in_smp;
static inline struct inet_hashinfo *tcp_or_dccp_get_hashinfo(const struct sock *sk)
{
@@ -241,7 +236,7 @@ bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
struct inet_bind2_bucket *
inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head,
- unsigned short port, int l3mdev,
+ struct inet_bind_bucket *tb,
const struct sock *sk);
void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
@@ -379,6 +374,27 @@ struct sock *__inet_lookup_established(struct net *net,
const __be32 daddr, const u16 hnum,
const int dif, const int sdif);
+typedef u32 (inet_ehashfn_t)(const struct net *net,
+ const __be32 laddr, const __u16 lport,
+ const __be32 faddr, const __be16 fport);
+
+inet_ehashfn_t inet_ehashfn;
+
+INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn);
+
+struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
+ struct sk_buff *skb, int doff,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned short hnum,
+ inet_ehashfn_t *ehashfn);
+
+struct sock *inet_lookup_run_sk_lookup(struct net *net,
+ int protocol,
+ struct sk_buff *skb, int doff,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, u16 hnum, const int dif,
+ inet_ehashfn_t *ehashfn);
+
static inline struct sock *
inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
@@ -428,6 +444,46 @@ static inline struct sock *inet_lookup(struct net *net,
return sk;
}
+static inline
+struct sock *inet_steal_sock(struct net *net, struct sk_buff *skb, int doff,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
+ bool *refcounted, inet_ehashfn_t *ehashfn)
+{
+ struct sock *sk, *reuse_sk;
+ bool prefetched;
+
+ sk = skb_steal_sock(skb, refcounted, &prefetched);
+ if (!sk)
+ return NULL;
+
+ if (!prefetched || !sk_fullsock(sk))
+ return sk;
+
+ if (sk->sk_protocol == IPPROTO_TCP) {
+ if (sk->sk_state != TCP_LISTEN)
+ return sk;
+ } else if (sk->sk_protocol == IPPROTO_UDP) {
+ if (sk->sk_state != TCP_CLOSE)
+ return sk;
+ } else {
+ return sk;
+ }
+
+ reuse_sk = inet_lookup_reuseport(net, sk, skb, doff,
+ saddr, sport, daddr, ntohs(dport),
+ ehashfn);
+ if (!reuse_sk)
+ return sk;
+
+ /* We've chosen a new reuseport sock which is never refcounted. This
+ * implies that sk also isn't refcounted.
+ */
+ WARN_ON_ONCE(*refcounted);
+
+ return reuse_sk;
+}
+
static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
struct sk_buff *skb,
int doff,
@@ -436,22 +492,23 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
const int sdif,
bool *refcounted)
{
- struct sock *sk = skb_steal_sock(skb, refcounted);
+ struct net *net = dev_net(skb_dst(skb)->dev);
const struct iphdr *iph = ip_hdr(skb);
+ struct sock *sk;
+ sk = inet_steal_sock(net, skb, doff, iph->saddr, sport, iph->daddr, dport,
+ refcounted, inet_ehashfn);
+ if (IS_ERR(sk))
+ return NULL;
if (sk)
return sk;
- return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
+ return __inet_lookup(net, hashinfo, skb,
doff, iph->saddr, sport,
iph->daddr, dport, inet_iif(skb), sdif,
refcounted);
}
-u32 inet6_ehashfn(const struct net *net,
- const struct in6_addr *laddr, const u16 lport,
- const struct in6_addr *faddr, const __be16 fport);
-
static inline void sk_daddr_set(struct sock *sk, __be32 addr)
{
sk->sk_daddr = addr; /* alias of inet_daddr */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index caa20a905531..d94c242eb3ed 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -107,11 +107,12 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
{
- if (!sk->sk_mark &&
- READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
+ u32 mark = READ_ONCE(sk->sk_mark);
+
+ if (!mark && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept))
return skb->mark;
- return sk->sk_mark;
+ return mark;
}
static inline int inet_request_bound_dev_if(const struct sock *sk,
@@ -193,13 +194,13 @@ struct rtable;
* @inet_rcv_saddr - Bound local IPv4 addr
* @inet_dport - Destination port
* @inet_num - Local port
+ * @inet_flags - various atomic flags
* @inet_saddr - Sending source
* @uc_ttl - Unicast TTL
* @inet_sport - Source port
* @inet_id - ID counter for DF pkts
* @tos - TOS
* @mc_ttl - Multicasting TTL
- * @is_icsk - is this an inet_connection_sock?
* @uc_index - Unicast outgoing device index
* @mc_index - Multicast device index
* @mc_list - Group array
@@ -217,63 +218,95 @@ struct inet_sock {
#define inet_dport sk.__sk_common.skc_dport
#define inet_num sk.__sk_common.skc_num
+ unsigned long inet_flags;
__be32 inet_saddr;
__s16 uc_ttl;
- __u16 cmsg_flags;
- struct ip_options_rcu __rcu *inet_opt;
__be16 inet_sport;
- __u16 inet_id;
+ struct ip_options_rcu __rcu *inet_opt;
+ atomic_t inet_id;
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
__u8 pmtudisc;
- __u8 recverr:1,
- is_icsk:1,
- freebind:1,
- hdrincl:1,
- mc_loop:1,
- transparent:1,
- mc_all:1,
- nodefrag:1;
- __u8 bind_address_no_port:1,
- recverr_rfc4884:1,
- defer_connect:1; /* Indicates that fastopen_connect is set
- * and cookie exists so we defer connect
- * until first data frame is written
- */
__u8 rcv_tos;
__u8 convert_csum;
int uc_index;
int mc_index;
__be32 mc_addr;
+ u32 local_port_range; /* high << 16 | low */
+
struct ip_mc_socklist __rcu *mc_list;
struct inet_cork_full cork;
- struct {
- __u16 lo;
- __u16 hi;
- } local_port_range;
};
#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */
-#define IPCORK_ALLFRAG 2 /* always fragment (for ipv6 for now) */
+
+enum {
+ INET_FLAGS_PKTINFO = 0,
+ INET_FLAGS_TTL = 1,
+ INET_FLAGS_TOS = 2,
+ INET_FLAGS_RECVOPTS = 3,
+ INET_FLAGS_RETOPTS = 4,
+ INET_FLAGS_PASSSEC = 5,
+ INET_FLAGS_ORIGDSTADDR = 6,
+ INET_FLAGS_CHECKSUM = 7,
+ INET_FLAGS_RECVFRAGSIZE = 8,
+
+ INET_FLAGS_RECVERR = 9,
+ INET_FLAGS_RECVERR_RFC4884 = 10,
+ INET_FLAGS_FREEBIND = 11,
+ INET_FLAGS_HDRINCL = 12,
+ INET_FLAGS_MC_LOOP = 13,
+ INET_FLAGS_MC_ALL = 14,
+ INET_FLAGS_TRANSPARENT = 15,
+ INET_FLAGS_IS_ICSK = 16,
+ INET_FLAGS_NODEFRAG = 17,
+ INET_FLAGS_BIND_ADDRESS_NO_PORT = 18,
+ INET_FLAGS_DEFER_CONNECT = 19,
+ INET_FLAGS_MC6_LOOP = 20,
+ INET_FLAGS_RECVERR6_RFC4884 = 21,
+ INET_FLAGS_MC6_ALL = 22,
+ INET_FLAGS_AUTOFLOWLABEL_SET = 23,
+ INET_FLAGS_AUTOFLOWLABEL = 24,
+ INET_FLAGS_DONTFRAG = 25,
+ INET_FLAGS_RECVERR6 = 26,
+ INET_FLAGS_REPFLOW = 27,
+ INET_FLAGS_RTALERT_ISOLATE = 28,
+ INET_FLAGS_SNDFLOW = 29,
+};
/* cmsg flags for inet */
-#define IP_CMSG_PKTINFO BIT(0)
-#define IP_CMSG_TTL BIT(1)
-#define IP_CMSG_TOS BIT(2)
-#define IP_CMSG_RECVOPTS BIT(3)
-#define IP_CMSG_RETOPTS BIT(4)
-#define IP_CMSG_PASSSEC BIT(5)
-#define IP_CMSG_ORIGDSTADDR BIT(6)
-#define IP_CMSG_CHECKSUM BIT(7)
-#define IP_CMSG_RECVFRAGSIZE BIT(8)
-
-static inline bool sk_is_inet(struct sock *sk)
+#define IP_CMSG_PKTINFO BIT(INET_FLAGS_PKTINFO)
+#define IP_CMSG_TTL BIT(INET_FLAGS_TTL)
+#define IP_CMSG_TOS BIT(INET_FLAGS_TOS)
+#define IP_CMSG_RECVOPTS BIT(INET_FLAGS_RECVOPTS)
+#define IP_CMSG_RETOPTS BIT(INET_FLAGS_RETOPTS)
+#define IP_CMSG_PASSSEC BIT(INET_FLAGS_PASSSEC)
+#define IP_CMSG_ORIGDSTADDR BIT(INET_FLAGS_ORIGDSTADDR)
+#define IP_CMSG_CHECKSUM BIT(INET_FLAGS_CHECKSUM)
+#define IP_CMSG_RECVFRAGSIZE BIT(INET_FLAGS_RECVFRAGSIZE)
+
+#define IP_CMSG_ALL (IP_CMSG_PKTINFO | IP_CMSG_TTL | \
+ IP_CMSG_TOS | IP_CMSG_RECVOPTS | \
+ IP_CMSG_RETOPTS | IP_CMSG_PASSSEC | \
+ IP_CMSG_ORIGDSTADDR | IP_CMSG_CHECKSUM | \
+ IP_CMSG_RECVFRAGSIZE)
+
+static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
{
- return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
+ return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL;
}
+#define inet_test_bit(nr, sk) \
+ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet_set_bit(nr, sk) \
+ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet_clear_bit(nr, sk) \
+ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
+
/**
* sk_to_full_sk - Access to a full socket
* @sk: pointer to a socket
@@ -362,7 +395,7 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
__u8 flags = 0;
- if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
+ if (inet_test_bit(TRANSPARENT, sk) || inet_test_bit(HDRINCL, sk))
flags |= FLOWI_FLAG_ANYSRC;
return flags;
}
@@ -388,7 +421,8 @@ static inline bool inet_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) ||
- inet->freebind || inet->transparent;
+ test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) ||
+ test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags);
}
static inline bool inet_addr_valid_or_nonlocal(struct net *net,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 4a8e578405cb..f28da08a37b4 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -67,20 +67,17 @@ struct inet_timewait_sock {
/* And these are ours. */
unsigned int tw_transparent : 1,
tw_flowlabel : 20,
- tw_pad : 3, /* 3 bits hole */
+ tw_usec_ts : 1,
+ tw_pad : 2, /* 2 bits hole */
tw_tos : 8;
u32 tw_txhash;
u32 tw_priority;
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
struct inet_bind2_bucket *tw_tb2;
- struct hlist_node tw_bind2_node;
};
#define tw_tclass tw_tos
-#define twsk_for_each_bound_bhash2(__tw, list) \
- hlist_for_each_entry(__tw, list, tw_bind2_node)
-
static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
return (struct inet_timewait_sock *)sk;
diff --git a/include/net/ip.h b/include/net/ip.h
index acec504c469a..25cb688bdc62 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -57,6 +57,7 @@ struct inet_skb_parm {
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_L3SLAVE BIT(7)
#define IPSKB_NOPOLICY BIT(8)
+#define IPSKB_MULTIPATH BIT(9)
u16 frag_max_size;
};
@@ -93,8 +94,8 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
{
ipcm_init(ipcm);
- ipcm->sockc.mark = inet->sk.sk_mark;
- ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+ ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
+ ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
ipcm->protocol = inet->inet_num;
@@ -222,8 +223,6 @@ int ip_append_data(struct sock *sk, struct flowi4 *fl4,
unsigned int flags);
int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb);
-ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
- int offset, size_t size, int flags);
struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork);
@@ -244,14 +243,22 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
}
-static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
+/* Get the route scope that should be used when sending a packet. */
+static inline u8 ip_sendmsg_scope(const struct inet_sock *inet,
+ const struct ipcm_cookie *ipc,
+ const struct msghdr *msg)
{
- return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+ if (sock_flag(&inet->sk, SOCK_LOCALROUTE) ||
+ msg->msg_flags & MSG_DONTROUTE ||
+ (ipc->opt && ipc->opt->opt.is_strictroute))
+ return RT_SCOPE_LINK;
+
+ return RT_SCOPE_UNIVERSE;
}
-static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
+static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
{
- return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
+ return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos));
}
/* datagram.c */
@@ -282,7 +289,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
const struct ip_options *sopt,
__be32 daddr, __be32 saddr,
const struct ip_reply_arg *arg,
- unsigned int len, u64 transmit_time);
+ unsigned int len, u64 transmit_time, u32 txhash);
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
@@ -342,8 +349,14 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
} \
}
-void inet_get_local_port_range(const struct net *net, int *low, int *high);
-void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
+static inline void inet_get_local_port_range(const struct net *net, int *low, int *high)
+{
+ u32 range = READ_ONCE(net->ipv4.ip_local_ports.range);
+
+ *low = range & 0xffff;
+ *high = range >> 16;
+}
+bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
#ifdef CONFIG_SYSCTL
static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
@@ -427,19 +440,22 @@ int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
static inline bool ip_sk_accept_pmtu(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
- inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
+
+ return pmtudisc != IP_PMTUDISC_INTERFACE &&
+ pmtudisc != IP_PMTUDISC_OMIT;
}
static inline bool ip_sk_use_pmtu(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
+ return READ_ONCE(inet_sk(sk)->pmtudisc) < IP_PMTUDISC_PROBE;
}
static inline bool ip_sk_ignore_df(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
- inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
+
+ return pmtudisc < IP_PMTUDISC_DO || pmtudisc == IP_PMTUDISC_OMIT;
}
static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
@@ -532,8 +548,19 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
* generator as much as we can.
*/
if (sk && inet_sk(sk)->inet_daddr) {
- iph->id = htons(inet_sk(sk)->inet_id);
- inet_sk(sk)->inet_id += segs;
+ int val;
+
+ /* avoid atomic operations for TCP,
+ * as we hold socket lock at this point.
+ */
+ if (sk_is_tcp(sk)) {
+ sock_owned_by_me(sk);
+ val = atomic_read(&inet_sk(sk)->inet_id);
+ atomic_set(&inet_sk(sk)->inet_id, val + segs);
+ } else {
+ val = atomic_add_return(segs, &inet_sk(sk)->inet_id);
+ }
+ iph->id = htons(val);
return;
}
if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
@@ -740,7 +767,7 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
* Functions provided by ip_sockglue.c
*/
-void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst);
void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 05e6f756feaf..9ba6413fd2e3 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -604,7 +604,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
if (!net->ipv6.fib6_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl6->fl6_sport = flkeys->ports.src;
fl6->fl6_dport = flkeys->ports.dst;
fl6->flowi6_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 3556595ce59a..28b065790261 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -53,13 +53,12 @@ struct route_info {
*/
static inline int rt6_srcprefs2flags(unsigned int srcprefs)
{
- /* No need to bitmask because srcprefs have only 3 bits. */
- return srcprefs << 3;
+ return (srcprefs & IPV6_PREFER_SRC_MASK) << 3;
}
static inline unsigned int rt6_flags2srcprefs(int flags)
{
- return (flags >> 3) & 7;
+ return (flags >> 3) & IPV6_PREFER_SRC_MASK;
}
static inline bool rt6_need_strict(const struct in6_addr *daddr)
@@ -156,7 +155,7 @@ void fib6_force_start_gc(struct net *net);
struct fib6_info *addrconf_f6i_alloc(struct net *net, struct inet6_dev *idev,
const struct in6_addr *addr, bool anycast,
- gfp_t gfp_flags);
+ gfp_t gfp_flags, struct netlink_ext_ack *extack);
struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
int flags);
@@ -266,7 +265,7 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
const struct dst_entry *dst = skb_dst(skb);
unsigned int mtu;
- if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) {
+ if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) {
mtu = READ_ONCE(dst->dev->mtu);
mtu -= lwtunnel_headroom(dst->lwtstate, mtu);
} else {
@@ -277,14 +276,18 @@ static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb)
static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
{
- return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE &&
- inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc);
+
+ return pmtudisc != IPV6_PMTUDISC_INTERFACE &&
+ pmtudisc != IPV6_PMTUDISC_OMIT;
}
static inline bool ip6_sk_ignore_df(const struct sock *sk)
{
- return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
- inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
+ u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc);
+
+ return pmtudisc < IPV6_PMTUDISC_DO ||
+ pmtudisc == IPV6_PMTUDISC_OMIT;
}
static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a378eff827c7..d4667b7797e3 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -154,9 +154,10 @@ struct fib_info {
int fib_nhs;
bool fib_nh_is_v6;
bool nh_updated;
+ bool pfsrc_removed;
struct nexthop *nh;
struct rcu_head rcu;
- struct fib_nh fib_nh[];
+ struct fib_nh fib_nh[] __counted_by(fib_nhs);
};
@@ -418,7 +419,10 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
if (!net->ipv4.fib_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl4->fl4_sport = flkeys->ports.src;
fl4->fl4_dport = flkeys->ports.dst;
fl4->flowi4_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index ed4b6ad3fcac..2d746f4c9a0a 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -52,6 +52,7 @@ struct ip_tunnel_key {
u8 tos; /* TOS for IPv4, TC for IPv6 */
u8 ttl; /* TTL for IPv4, HL for IPv6 */
__be32 label; /* Flow Label for IPv6 */
+ u32 nhid;
__be16 tp_src;
__be16 tp_dst;
__u8 flow_flags;
@@ -415,6 +416,17 @@ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
return 0;
}
+static inline __be32 ip_tunnel_get_flowlabel(const struct iphdr *iph,
+ const struct sk_buff *skb)
+{
+ __be16 payload_protocol = skb_protocol(skb, true);
+
+ if (payload_protocol == htons(ETH_P_IPV6))
+ return ip6_flowlabel((const struct ipv6hdr *)iph);
+ else
+ return 0;
+}
+
static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph,
const struct sk_buff *skb)
{
@@ -482,15 +494,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
u64_stats_inc(&tstats->tx_packets);
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
+ return;
+ }
+
+ if (pkt_len < 0) {
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
} else {
- struct net_device_stats *err_stats = &dev->stats;
-
- if (pkt_len < 0) {
- err_stats->tx_errors++;
- err_stats->tx_aborted_errors++;
- } else {
- err_stats->tx_dropped++;
- }
+ DEV_STATS_INC(dev, tx_dropped);
}
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 7332296eca44..cf25ea21d770 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -373,12 +373,12 @@ static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
}
static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
- const struct ipv6_pinfo *np)
+ const struct sock *sk)
{
*ipc6 = (struct ipcm6_cookie) {
.hlimit = -1,
- .tclass = np->tclass,
- .dontfrag = np->dontfrag,
+ .tclass = inet6_sk(sk)->tclass,
+ .dontfrag = inet6_test_bit(DONTFRAG, sk),
};
}
@@ -428,7 +428,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
int flags);
int ip6_flowlabel_init(void);
void ip6_flowlabel_cleanup(void);
-bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
+bool ip6_autoflowlabel(struct net *net, const struct sock *sk);
static inline void fl6_sock_release(struct ip6_flowlabel *fl)
{
@@ -752,12 +752,8 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
/* more secured version of ipv6_addr_hash() */
static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
{
- u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
-
- return jhash_3words(v,
- (__force u32)a->s6_addr32[2],
- (__force u32)a->s6_addr32[3],
- initval);
+ return jhash2((__force const u32 *)a->s6_addr32,
+ ARRAY_SIZE(a->s6_addr32), initval);
}
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
@@ -913,9 +909,9 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
int hlimit;
if (ipv6_addr_is_multicast(&fl6->daddr))
- hlimit = np->mcast_hops;
+ hlimit = READ_ONCE(np->mcast_hops);
else
- hlimit = np->hop_limit;
+ hlimit = READ_ONCE(np->hop_limit);
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
return hlimit;
@@ -941,7 +937,8 @@ static inline bool ipv6_can_nonlocal_bind(struct net *net,
struct inet_sock *inet)
{
return net->ipv6.sysctl.ip_nonlocal_bind ||
- inet->freebind || inet->transparent;
+ test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) ||
+ test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags);
}
/* Sysctl settings for net ipv6.auto_flowlabels */
@@ -1131,12 +1128,6 @@ struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, st
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,
bool connected);
-struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
- struct net_device *dev,
- struct net *net, struct socket *sock,
- struct in6_addr *saddr,
- const struct ip_tunnel_info *info,
- u8 protocol, bool use_cache);
struct dst_entry *ip6_blackhole_route(struct net *net,
struct dst_entry *orig_dst);
@@ -1220,6 +1211,7 @@ void inet6_cleanup_sock(struct sock *sk);
void inet6_sock_destruct(struct sock *sk);
int inet6_release(struct socket *sock);
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet6_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
int peer);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
@@ -1274,7 +1266,9 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
#ifdef CONFIG_SYSCTL
struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+size_t ipv6_icmp_sysctl_table_size(void);
struct ctl_table *ipv6_route_sysctl_init(struct net *net);
+size_t ipv6_route_sysctl_table_size(struct net *net);
int ipv6_sysctl_register(void);
void ipv6_sysctl_unregister(void);
#endif
@@ -1298,15 +1292,16 @@ static inline int ip6_sock_set_v6only(struct sock *sk)
static inline void ip6_sock_set_recverr(struct sock *sk)
{
- lock_sock(sk);
- inet6_sk(sk)->recverr = true;
- release_sock(sk);
+ inet6_set_bit(RECVERR6, sk);
}
-static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
+#define IPV6_PREFER_SRC_MASK (IPV6_PREFER_SRC_TMP | IPV6_PREFER_SRC_PUBLIC | \
+ IPV6_PREFER_SRC_COA)
+
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
+ unsigned int prefmask = ~IPV6_PREFER_SRC_MASK;
unsigned int pref = 0;
- unsigned int prefmask = ~0;
/* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */
switch (val & (IPV6_PREFER_SRC_PUBLIC |
@@ -1356,20 +1351,11 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
return -EINVAL;
}
- inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref;
+ WRITE_ONCE(inet6_sk(sk)->srcprefs,
+ (READ_ONCE(inet6_sk(sk)->srcprefs) & prefmask) | pref);
return 0;
}
-static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
-{
- int ret;
-
- lock_sock(sk);
- ret = __ip6_sock_set_addr_preferences(sk, val);
- release_sock(sk);
- return ret;
-}
-
static inline void ip6_sock_set_recvpktinfo(struct sock *sk)
{
lock_sock(sk);
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index c48186bf4737..485c39a89866 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -60,6 +60,9 @@ struct ipv6_stub {
#if IS_ENABLED(CONFIG_XFRM)
void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu);
int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ struct sk_buff *(*xfrm6_gro_udp_encap_rcv)(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb);
int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
#endif
@@ -85,6 +88,11 @@ struct ipv6_bpf_stub {
sockptr_t optval, unsigned int optlen);
int (*ipv6_getsockopt)(struct sock *sk, int level, int optname,
sockptr_t optval, sockptr_t optlen);
+ int (*ipv6_dev_get_saddr)(struct net *net,
+ const struct net_device *dst_dev,
+ const struct in6_addr *daddr,
+ unsigned int prefs,
+ struct in6_addr *saddr);
};
extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
index f9e88401d7da..8b2055d64a6b 100644
--- a/include/net/iucv/iucv.h
+++ b/include/net/iucv/iucv.h
@@ -80,7 +80,7 @@ struct iucv_array {
u32 length;
} __attribute__ ((aligned (8)));
-extern struct bus_type iucv_bus;
+extern const struct bus_type iucv_bus;
extern struct device *iucv_root;
/*
@@ -489,7 +489,7 @@ struct iucv_interface {
int (*path_sever)(struct iucv_path *path, u8 userdata[16]);
int (*iucv_register)(struct iucv_handler *handler, int smp);
void (*iucv_unregister)(struct iucv_handler *handler, int smp);
- struct bus_type *bus;
+ const struct bus_type *bus;
struct device *root;
};
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index d2ea5863eedc..b2cf243ebe44 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -426,17 +426,10 @@ struct iw_public_data {
/**************************** PROTOTYPES ****************************/
/*
- * Functions part of the Wireless Extensions (defined in net/core/wireless.c).
- * Those may be called only within the kernel.
+ * Functions part of the Wireless Extensions (defined in net/wireless/wext-core.c).
+ * Those may be called by driver modules.
*/
-/* First : function strictly used inside the kernel */
-
-/* Handle /proc/net/wireless, called in net/code/dev.c */
-int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
-
-/* Second : functions that may be called by driver modules */
-
/* Send a single event to user space */
void wireless_send_event(struct net_device *dev, unsigned int cmd,
union iwreq_data *wrqu, const char *extra);
diff --git a/include/net/kcm.h b/include/net/kcm.h
index 2d704f8f4905..90279e5e09a5 100644
--- a/include/net/kcm.h
+++ b/include/net/kcm.h
@@ -47,9 +47,9 @@ struct kcm_stats {
struct kcm_tx_msg {
unsigned int sent;
- unsigned int fragidx;
unsigned int frag_offset;
unsigned int msg_flags;
+ bool started_tx;
struct sk_buff *frag_skb;
struct sk_buff *last_skb;
};
diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
index 3e1f76786d7b..7620a9196922 100644
--- a/include/net/llc_c_ac.h
+++ b/include/net/llc_c_ac.h
@@ -175,7 +175,6 @@ int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
-int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
void llc_conn_busy_tmr_cb(struct timer_list *t);
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
index 3948cf111dd0..241889955157 100644
--- a/include/net/llc_c_ev.h
+++ b/include/net/llc_c_ev.h
@@ -158,7 +158,6 @@ int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
-int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
/* NOT_USED functions and their variations */
int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index 2c1ea3414640..374411b3066c 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -111,7 +111,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
- struct llc_addr *laddr);
+ struct llc_addr *laddr, const struct net *net);
void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 49aa79c7b278..1d55ba7c45be 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -262,21 +262,19 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
*/
static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
{
- if (skb->protocol == htons(ETH_P_802_2))
- memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+ memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
}
/**
* llc_pdu_decode_da - extracts dest address of input frame
* @skb: input skb that destination address must be extracted from it
- * @sa: pointer to destination address (6 byte array).
+ * @da: pointer to destination address (6 byte array).
*
* This function extracts destination address(MAC) of input frame.
*/
static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
{
- if (skb->protocol == htons(ETH_P_802_2))
- memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+ memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
}
/**
@@ -321,7 +319,7 @@ static inline void llc_pdu_init_as_ui_cmd(struct sk_buff *skb)
/**
* llc_pdu_init_as_test_cmd - sets PDU as TEST
- * @skb - Address of the skb to build
+ * @skb: Address of the skb to build
*
* Sets a PDU as TEST
*/
@@ -369,6 +367,8 @@ struct llc_xid_info {
/**
* llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
* @skb: input skb that header must be set into it.
+ * @svcs_supported: The class of the LLC (I or II)
+ * @rx_window: The size of the receive window of the LLC
*
* This function sets third,fourth,fifth and sixth bytes of LLC header as
* a XID PDU.
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 6f15e6fa154e..53bd2d02a4f0 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -16,9 +16,12 @@
#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1)
#define LWTUNNEL_STATE_XMIT_REDIRECT BIT(2)
+/* LWTUNNEL_XMIT_CONTINUE should be distinguishable from dst_output return
+ * values (NET_XMIT_xxx and NETDEV_TX_xxx in linux/netdevice.h) for safety.
+ */
enum {
LWTUNNEL_XMIT_DONE,
- LWTUNNEL_XMIT_CONTINUE,
+ LWTUNNEL_XMIT_CONTINUE = 0x100,
};
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index ac0370e76874..d400fe2e8668 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7,7 +7,7 @@
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
*/
#ifndef MAC80211_H
@@ -79,7 +79,7 @@
* helpers for sanity checking. Drivers must ensure all work added onto the
* mac80211 workqueue should be cancelled on the driver stop() callback.
*
- * mac80211 will flushed the workqueue upon interface removal and during
+ * mac80211 will flush the workqueue upon interface removal and during
* suspend.
*
* All work performed on the mac80211 workqueue must not acquire the RTNL lock.
@@ -138,7 +138,7 @@
* field to the frame RX timestamp and report the ack TX timestamp in the
* ieee80211_rx_status struct.
*
- * Similarly, To report hardware timestamps for Timing Measurement or Fine
+ * Similarly, to report hardware timestamps for Timing Measurement or Fine
* Timing Measurement frame TX, the driver should set the SKB's hwtstamp field
* to the frame TX timestamp and report the ack RX timestamp in the
* ieee80211_tx_status struct.
@@ -341,6 +341,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
* status changed.
* @BSS_CHANGED_EHT_PUNCTURING: The channel puncturing bitmap changed.
+ * @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed.
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -376,6 +377,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_FILS_DISCOVERY = 1<<30,
BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
BSS_CHANGED_EHT_PUNCTURING = BIT_ULL(32),
+ BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33),
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -474,9 +476,9 @@ struct ieee80211_ba_event {
/**
* struct ieee80211_event - event to be sent to the driver
* @type: The event itself. See &enum ieee80211_event_type.
- * @rssi: relevant if &type is %RSSI_EVENT
- * @mlme: relevant if &type is %AUTH_EVENT
- * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
+ * @u.rssi: relevant if &type is %RSSI_EVENT
+ * @u.mlme: relevant if &type is %AUTH_EVENT
+ * @u.ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
* @u:union holding the fields above
*/
struct ieee80211_event {
@@ -539,8 +541,6 @@ struct ieee80211_fils_discovery {
* @link_id: link ID, or 0 for non-MLO
* @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
* @uora_exists: is the UORA element advertised by AP
- * @ack_enabled: indicates support to receive a multi-TID that solicits either
- * ACK, BACK or both
* @uora_ocw_range: UORA element's OCW Range field
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @he_support: does this BSS support HE
@@ -643,9 +643,7 @@ struct ieee80211_fils_discovery {
* @pwr_reduction: power constraint of BSS.
* @eht_support: does this BSS support EHT
* @eht_puncturing: bitmap to indicate which channels are punctured in this BSS
- * @csa_active: marks whether a channel switch is going on. Internally it is
- * write-protected by sdata_lock and local->mtx so holding either is fine
- * for read access.
+ * @csa_active: marks whether a channel switch is going on.
* @csa_punct_bitmap: new puncturing bitmap for channel switch
* @mu_mimo_owner: indicates interface owns MU-MIMO capability
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
@@ -653,9 +651,7 @@ struct ieee80211_fils_discovery {
* path needing to access it; even though the netdev carrier will always
* be off when it is %NULL there can still be races and packets could be
* processed after it switches back to %NULL.
- * @color_change_active: marks whether a color change is ongoing. Internally it is
- * write-protected by sdata_lock and local->mtx so holding either is fine
- * for read access.
+ * @color_change_active: marks whether a color change is ongoing.
* @color_change_color: the bss color that will be used after the change.
* @ht_ldpc: in AP mode, indicates interface has HT LDPC capability.
* @vht_ldpc: in AP mode, indicates interface has VHT LDPC capability.
@@ -1082,6 +1078,11 @@ struct ieee80211_tx_rate {
#define IEEE80211_MAX_TX_RETRY 31
+static inline bool ieee80211_rate_valid(struct ieee80211_tx_rate *rate)
+{
+ return rate->idx >= 0 && rate->count > 0;
+}
+
static inline void ieee80211_rate_set_vht(struct ieee80211_tx_rate *rate,
u8 mcs, u8 nss)
{
@@ -1115,7 +1116,9 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* not valid if the interface is an MLD since we won't know which
* link the frame will be transmitted on
* @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
- * @ack_frame_id: internal frame ID for TX status, used internally
+ * @status_data: internal data for TX status handling, assigned privately,
+ * see also &enum ieee80211_status_data for the internal documentation
+ * @status_data_idr: indicates status data is IDR allocated ID for ack frame
* @tx_time_est: TX time estimate in units of 4us, used internally
* @control: union part for control data
* @control.rates: TX rates array to try
@@ -1145,20 +1148,16 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @ack: union part for pure ACK data
* @ack.cookie: cookie for the ACK
* @driver_data: array of driver_data pointers
- * @ampdu_ack_len: number of acked aggregated frames.
- * relevant only if IEEE80211_TX_STAT_AMPDU was set.
- * @ampdu_len: number of aggregated frames.
- * relevant only if IEEE80211_TX_STAT_AMPDU was set.
- * @ack_signal: signal strength of the ACK frame
*/
struct ieee80211_tx_info {
/* common information */
u32 flags;
u32 band:3,
- ack_frame_id:13,
+ status_data_idr:1,
+ status_data:13,
hw_queue:4,
tx_time_est:10;
- /* 2 free bits */
+ /* 1 free bit */
union {
struct {
@@ -1172,7 +1171,11 @@ struct ieee80211_tx_info {
u8 use_cts_prot:1;
u8 short_preamble:1;
u8 skip_table:1;
- /* 2 bytes free */
+
+ /* for injection only (bitmap) */
+ u8 antennas:2;
+
+ /* 14 bits free */
};
/* only needed before rate control */
unsigned long jiffies;
@@ -1192,9 +1195,11 @@ struct ieee80211_tx_info {
u8 ampdu_ack_len;
u8 ampdu_len;
u8 antenna;
+ u8 pad;
u16 tx_time;
u8 flags;
- void *status_driver_data[18 / sizeof(void *)];
+ u8 pad2;
+ void *status_driver_data[16 / sizeof(void *)];
} status;
struct {
struct ieee80211_tx_rate driver_rates[
@@ -1350,6 +1355,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* the frame.
* @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
* the frame.
+ * @RX_FLAG_MACTIME: The timestamp passed in the RX status (@mactime
+ * field) is valid if this field is non-zero, and the position
+ * where the timestamp was sampled depends on the value.
* @RX_FLAG_MACTIME_START: The timestamp passed in the RX status (@mactime
* field) is valid and contains the time the first symbol of the MPDU
* was received. This is useful in monitor mode and for proper IBSS
@@ -1359,6 +1367,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* (including FCS) was received.
* @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime
* field) is valid and contains the time the SYNC preamble was received.
+ * @RX_FLAG_MACTIME_IS_RTAP_TS64: The timestamp passed in the RX status @mactime
+ * is only for use in the radiotap timestamp header, not otherwise a valid
+ * @mactime value. Note this is a separate flag so that we continue to see
+ * %RX_FLAG_MACTIME as unset. Also note that in this case the timestamp is
+ * reported to be 64 bits wide, not just 32.
* @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
* Valid only for data frames (mainly A-MPDU)
* @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
@@ -1429,12 +1442,12 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
RX_FLAG_DECRYPTED = BIT(1),
- RX_FLAG_MACTIME_PLCP_START = BIT(2),
+ RX_FLAG_ONLY_MONITOR = BIT(2),
RX_FLAG_MMIC_STRIPPED = BIT(3),
RX_FLAG_IV_STRIPPED = BIT(4),
RX_FLAG_FAILED_FCS_CRC = BIT(5),
RX_FLAG_FAILED_PLCP_CRC = BIT(6),
- RX_FLAG_MACTIME_START = BIT(7),
+ RX_FLAG_MACTIME_IS_RTAP_TS64 = BIT(7),
RX_FLAG_NO_SIGNAL_VAL = BIT(8),
RX_FLAG_AMPDU_DETAILS = BIT(9),
RX_FLAG_PN_VALIDATED = BIT(10),
@@ -1443,8 +1456,10 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_IS_LAST = BIT(13),
RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14),
RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15),
- RX_FLAG_MACTIME_END = BIT(16),
- RX_FLAG_ONLY_MONITOR = BIT(17),
+ RX_FLAG_MACTIME = BIT(16) | BIT(17),
+ RX_FLAG_MACTIME_PLCP_START = 1 << 16,
+ RX_FLAG_MACTIME_START = 2 << 16,
+ RX_FLAG_MACTIME_END = 3 << 16,
RX_FLAG_SKIP_MONITOR = BIT(18),
RX_FLAG_AMSDU_MORE = BIT(19),
RX_FLAG_RADIOTAP_TLV_AT_END = BIT(20),
@@ -1755,12 +1770,15 @@ struct ieee80211_channel_switch {
* @IEEE80211_VIF_GET_NOA_UPDATE: request to handle NOA attributes
* and send P2P_PS notification to the driver if NOA changed, even
* this is not pure P2P vif.
+ * @IEEE80211_VIF_EML_ACTIVE: The driver indicates that EML operation is
+ * enabled for the interface.
*/
enum ieee80211_vif_flags {
IEEE80211_VIF_BEACON_FILTER = BIT(0),
IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1),
IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2),
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
+ IEEE80211_VIF_EML_ACTIVE = BIT(4),
};
@@ -1790,6 +1808,9 @@ enum ieee80211_offload_flags {
* @ps: power-save mode (STA only). This flag is NOT affected by
* offchannel/dynamic_ps operations.
* @aid: association ID number, valid only when @assoc is true
+ * @eml_cap: EML capabilities as described in P802.11be_D2.2 Figure 9-1002k.
+ * @eml_med_sync_delay: Medium Synchronization delay as described in
+ * P802.11be_D2.2 Figure 9-1002j.
* @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
* may filter ARP queries targeted for other addresses than listed here.
* The driver must allow ARP queries targeted for all address listed here
@@ -1812,6 +1833,8 @@ struct ieee80211_vif_cfg {
bool ibss_creator;
bool ps;
u16 aid;
+ u16 eml_cap;
+ u16 eml_med_sync_delay;
__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
int arp_addr_cnt;
@@ -1838,6 +1861,8 @@ struct ieee80211_vif_cfg {
* @active_links: The bitmap of active links, or 0 for non-MLO.
* The driver shouldn't change this directly, but use the
* API calls meant for that purpose.
+ * @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO.
+ * Must be a subset of valid_links.
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
@@ -1875,7 +1900,7 @@ struct ieee80211_vif {
struct ieee80211_vif_cfg cfg;
struct ieee80211_bss_conf bss_conf;
struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
- u16 valid_links, active_links;
+ u16 valid_links, active_links, dormant_links;
u8 addr[ETH_ALEN] __aligned(2);
bool p2p;
@@ -1901,11 +1926,32 @@ struct ieee80211_vif {
u8 drv_priv[] __aligned(sizeof(void *));
};
+/**
+ * ieee80211_vif_usable_links - Return the usable links for the vif
+ * @vif: the vif for which the usable links are requested
+ * Return: the usable link bitmap
+ */
+static inline u16 ieee80211_vif_usable_links(const struct ieee80211_vif *vif)
+{
+ return vif->valid_links & ~vif->dormant_links;
+}
+
+/**
+ * ieee80211_vif_is_mld - Returns true iff the vif is an MLD one
+ * @vif: the vif
+ * Return: %true if the vif is an MLD, %false otherwise.
+ */
+static inline bool ieee80211_vif_is_mld(const struct ieee80211_vif *vif)
+{
+ /* valid_links != 0 indicates this vif is an MLD */
+ return vif->valid_links != 0;
+}
+
#define for_each_vif_active_link(vif, link, link_id) \
for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \
if ((!(vif)->active_links || \
(vif)->active_links & BIT(link_id)) && \
- (link = rcu_dereference((vif)->link_conf[link_id])))
+ (link = link_conf_dereference_check(vif, link_id)))
static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
{
@@ -1938,22 +1984,18 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*/
struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
-/**
- * lockdep_vif_mutex_held - for lockdep checks on link poiners
- * @vif: the interface to check
- */
-static inline bool lockdep_vif_mutex_held(struct ieee80211_vif *vif)
+static inline bool lockdep_vif_wiphy_mutex_held(struct ieee80211_vif *vif)
{
- return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->mtx);
+ return lockdep_is_held(&ieee80211_vif_to_wdev(vif)->wiphy->mtx);
}
#define link_conf_dereference_protected(vif, link_id) \
rcu_dereference_protected((vif)->link_conf[link_id], \
- lockdep_vif_mutex_held(vif))
+ lockdep_vif_wiphy_mutex_held(vif))
#define link_conf_dereference_check(vif, link_id) \
rcu_dereference_check((vif)->link_conf[link_id], \
- lockdep_vif_mutex_held(vif))
+ lockdep_vif_wiphy_mutex_held(vif))
/**
* enum ieee80211_key_flags - key flags
@@ -2228,6 +2270,7 @@ struct ieee80211_sta_aggregates {
* @he_cap: HE capabilities of this STA
* @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities
* @eht_cap: EHT capabilities of this STA
+ * @agg: per-link data for multi-link aggregation
* @bandwidth: current bandwidth the station can receive with
* @rx_nss: in HT/VHT, the maximum number of spatial streams the
* station can receive at the moment, changed by operating mode
@@ -2359,7 +2402,7 @@ static inline bool lockdep_sta_mutex_held(struct ieee80211_sta *pubsta)
for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) \
if ((!(vif)->active_links || \
(vif)->active_links & BIT(link_id)) && \
- ((link_sta) = link_sta_dereference_protected(sta, link_id)))
+ ((link_sta) = link_sta_dereference_check(sta, link_id)))
/**
* enum sta_notify_cmd - sta notify command
@@ -2646,6 +2689,9 @@ struct ieee80211_txq {
* @IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX: Hardware/driver handles transmitting
* multicast frames on all links, mac80211 should not do that.
*
+ * @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT
+ * and connecting with a lower bandwidth instead
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2703,6 +2749,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP,
IEEE80211_HW_DETECTS_COLOR_COLLISION,
IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX,
+ IEEE80211_HW_DISALLOW_PUNCTURING,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2791,8 +2838,6 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
*
- * @radiotap_he: HE radiotap validity flags
- *
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
* @units_pos member is set to a non-negative value then the timestamp
* field will be added and populated from the &struct ieee80211_rx_status
@@ -3022,7 +3067,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* The set_key() call for the %SET_KEY command should return 0 if
* the key is now in use, -%EOPNOTSUPP or -%ENOSPC if it couldn't be
* added; if you return 0 then hw_key_idx must be assigned to the
- * hardware key index, you are free to use the full u8 range.
+ * hardware key index. You are free to use the full u8 range.
*
* Note that in the case that the @IEEE80211_HW_SW_CRYPTO_CONTROL flag is
* set, mac80211 will not automatically fall back to software crypto if
@@ -3032,7 +3077,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* When the cmd is %DISABLE_KEY then it must succeed.
*
* Note that it is permissible to not decrypt a frame even if a key
- * for it has been uploaded to hardware, the stack will not make any
+ * for it has been uploaded to hardware. The stack will not make any
* decision based on whether a key has been uploaded or not but rather
* based on the receive flags.
*
@@ -3047,7 +3092,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* The update_tkip_key() call updates the driver with the new phase 1 key.
* This happens every time the iv16 wraps around (every 65536 packets). The
* set_key() call will happen only once for each key (unless the AP did
- * rekeying), it will not include a valid phase 1 key. The valid phase 1 key is
+ * rekeying); it will not include a valid phase 1 key. The valid phase 1 key is
* provided by update_tkip_key only. The trigger that makes mac80211 call this
* handler is software decryption with wrap around of iv16.
*
@@ -3074,7 +3119,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
*
* mac80211 has support for various powersave implementations.
*
- * First, it can support hardware that handles all powersaving by itself,
+ * First, it can support hardware that handles all powersaving by itself;
* such hardware should simply set the %IEEE80211_HW_SUPPORTS_PS hardware
* flag. In that case, it will be told about the desired powersave mode
* with the %IEEE80211_CONF_PS flag depending on the association status.
@@ -3099,12 +3144,12 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* %IEEE80211_HW_PS_NULLFUNC_STACK flags. The hardware is of course still
* required to pass up beacons. The hardware is still required to handle
* waking up for multicast traffic; if it cannot the driver must handle that
- * as best as it can, mac80211 is too slow to do that.
+ * as best as it can; mac80211 is too slow to do that.
*
* Dynamic powersave is an extension to normal powersave in which the
* hardware stays awake for a user-specified period of time after sending a
* frame so that reply frames need not be buffered and therefore delayed to
- * the next wakeup. It's compromise of getting good enough latency when
+ * the next wakeup. It's a compromise of getting good enough latency when
* there's data traffic and still saving significantly power in idle
* periods.
*
@@ -3169,7 +3214,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* Note that change, for the sake of simplification, also includes information
* elements appearing or disappearing from the beacon.
*
- * Some hardware supports an "ignore list" instead, just make sure nothing
+ * Some hardware supports an "ignore list" instead. Just make sure nothing
* that was requested is on the ignore list, and include commonly changing
* information element IDs in the ignore list, for example 11 (BSS load) and
* the various vendor-assigned IEs with unknown contents (128, 129, 133-136,
@@ -3180,7 +3225,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* In addition to these capabilities, hardware should support notifying the
* host of changes in the beacon RSSI. This is relevant to implement roaming
* when no traffic is flowing (when traffic is flowing we see the RSSI of
- * the received data packets). This can consist in notifying the host when
+ * the received data packets). This can consist of notifying the host when
* the RSSI changes significantly or when it drops below or rises above
* configurable thresholds. In the future these thresholds will also be
* configured by mac80211 (which gets them from userspace) to implement
@@ -3327,8 +3372,8 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* period starts for any reason, @release_buffered_frames is called
* with the number of frames to be released and which TIDs they are
* to come from. In this case, the driver is responsible for setting
- * the EOSP (for uAPSD) and MORE_DATA bits in the released frames,
- * to help the @more_data parameter is passed to tell the driver if
+ * the EOSP (for uAPSD) and MORE_DATA bits in the released frames.
+ * To help the @more_data parameter is passed to tell the driver if
* there is more data on other TIDs -- the TIDs to release frames
* from are ignored since mac80211 doesn't know how many frames the
* buffers for those TIDs contain.
@@ -3377,7 +3422,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* Additionally, the driver has to then use these HW queue IDs for the queue
* management functions (ieee80211_stop_queue() et al.)
*
- * The driver is free to set up the queue mappings as needed, multiple virtual
+ * The driver is free to set up the queue mappings as needed; multiple virtual
* interfaces may map to the same hardware queues if needed. The setup has to
* happen during add_interface or change_interface callbacks. For example, a
* driver supporting station+station and station+AP modes might decide to have
@@ -3601,11 +3646,14 @@ enum ieee80211_reconfig_type {
* @success: whether the frame exchange was successful, only
* used with the mgd_complete_tx() method, and then only
* valid for auth and (re)assoc.
+ * @link_id: the link id on which the frame will be TX'ed.
+ * Only used with the mgd_prepare_tx() method.
*/
struct ieee80211_prep_tx_info {
u16 duration;
u16 subtype;
u8 success:1;
+ int link_id;
};
/**
@@ -3829,6 +3877,10 @@ struct ieee80211_prep_tx_info {
* the station. See @sta_pre_rcu_remove if needed.
* This callback can sleep.
*
+ * @vif_add_debugfs: Drivers can use this callback to add a debugfs vif
+ * directory with its files. This callback should be within a
+ * CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep.
+ *
* @link_add_debugfs: Drivers can use this callback to add debugfs files
* when a link is added to a mac80211 vif. This callback should be within
* a CONFIG_MAC80211_DEBUGFS conditional. This callback can sleep.
@@ -3842,7 +3894,7 @@ struct ieee80211_prep_tx_info {
*
* @link_sta_add_debugfs: Drivers can use this callback to add debugfs files
* when a link is added to a mac80211 station. This callback
- * should be within a CPTCFG_MAC80211_DEBUGFS conditional. This
+ * should be within a CONFIG_MAC80211_DEBUGFS conditional. This
* callback can sleep.
* For non-MLO the callback will be called once for the deflink with the
* station's directory rather than a separate subdirectory.
@@ -4033,11 +4085,15 @@ struct ieee80211_prep_tx_info {
* This callback must be atomic.
*
* @get_et_sset_count: Ethtool API to get string-set count.
+ * Note that the wiphy mutex is not held for this callback since it's
+ * expected to return a static value.
*
* @get_et_stats: Ethtool API to get a set of u64 stats.
*
* @get_et_strings: Ethtool API to get a set of strings to describe stats
* and perhaps other supported types of ethtool data-sets.
+ * Note that the wiphy mutex is not held for this callback since it's
+ * expected to return a static value.
*
* @mgd_prepare_tx: Prepare for transmitting a management frame for association
* before associated. In multi-channel scenarios, a virtual interface is
@@ -4216,6 +4272,8 @@ struct ieee80211_prep_tx_info {
* disable background CAC/radar detection.
* @net_fill_forward_path: Called from .ndo_fill_forward_path in order to
* resolve a path for hardware flow offloading
+ * @can_activate_links: Checks if a specific active_links bitmap is
+ * supported by the driver.
* @change_vif_links: Change the valid links on an interface, note that while
* removing the old link information is still valid (link_conf pointer),
* but may immediately disappear after the function returns. The old or
@@ -4324,6 +4382,8 @@ struct ieee80211_ops {
int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
#ifdef CONFIG_MAC80211_DEBUGFS
+ void (*vif_add_debugfs)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
void (*link_add_debugfs)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
@@ -4472,7 +4532,8 @@ struct ieee80211_ops {
struct ieee80211_prep_tx_info *info);
void (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ unsigned int link_id);
int (*add_chanctx)(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx);
@@ -4510,7 +4571,8 @@ struct ieee80211_ops {
struct ieee80211_channel_switch *ch_switch);
int (*post_channel_switch)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
void (*abort_channel_switch)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void (*channel_switch_rx_beacon)(struct ieee80211_hw *hw,
@@ -4592,6 +4654,9 @@ struct ieee80211_ops {
struct ieee80211_sta *sta,
struct net_device_path_ctx *ctx,
struct net_device_path *path);
+ bool (*can_activate_links)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 active_links);
int (*change_vif_links)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 old_links, u16 new_links,
@@ -4856,7 +4921,7 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw);
* for a single hardware must be synchronized against each other. Calls to
* this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
* mixed for a single hardware. Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* This function must be called with BHs disabled and RCU read lock
*
@@ -4881,7 +4946,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
* for a single hardware must be synchronized against each other. Calls to
* this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
* mixed for a single hardware. Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* This function must be called with BHs disabled.
*
@@ -4906,7 +4971,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
* for a single hardware must be synchronized against each other. Calls to
* this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
* mixed for a single hardware. Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* In process context use instead ieee80211_rx_ni().
*
@@ -4926,7 +4991,7 @@ static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
*
* Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not
* be mixed for a single hardware.Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
@@ -4941,7 +5006,7 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb);
*
* Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may
* not be mixed for a single hardware. Must not run concurrently with
- * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ * ieee80211_tx_status_skb() or ieee80211_tx_status_ni().
*
* @hw: the hardware this frame came in on
* @skb: the buffer to receive, owned by mac80211 after this call
@@ -5117,7 +5182,7 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info);
/**
- * ieee80211_tx_status - transmit status callback
+ * ieee80211_tx_status_skb - transmit status callback
*
* Call this function for all transmitted frames after they have been
* transmitted. It is permissible to not call this function for
@@ -5132,13 +5197,13 @@ void ieee80211_tx_rate_update(struct ieee80211_hw *hw,
* @hw: the hardware the frame was transmitted by
* @skb: the frame that was transmitted, owned by mac80211 after this call
*/
-void ieee80211_tx_status(struct ieee80211_hw *hw,
- struct sk_buff *skb);
+void ieee80211_tx_status_skb(struct ieee80211_hw *hw,
+ struct sk_buff *skb);
/**
* ieee80211_tx_status_ext - extended transmit status callback
*
- * This function can be used as a replacement for ieee80211_tx_status
+ * This function can be used as a replacement for ieee80211_tx_status_skb()
* in drivers that may want to provide extra information that does not
* fit into &struct ieee80211_tx_info.
*
@@ -5155,7 +5220,7 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
/**
* ieee80211_tx_status_noskb - transmit status callback without skb
*
- * This function can be used as a replacement for ieee80211_tx_status
+ * This function can be used as a replacement for ieee80211_tx_status_skb()
* in drivers that cannot reliably map tx status information back to
* specific skbs.
*
@@ -5183,9 +5248,9 @@ static inline void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
/**
* ieee80211_tx_status_ni - transmit status callback (in process context)
*
- * Like ieee80211_tx_status() but can be called in process context.
+ * Like ieee80211_tx_status_skb() but can be called in process context.
*
- * Calls to this function, ieee80211_tx_status() and
+ * Calls to this function, ieee80211_tx_status_skb() and
* ieee80211_tx_status_irqsafe() may not be mixed
* for a single hardware.
*
@@ -5196,17 +5261,17 @@ static inline void ieee80211_tx_status_ni(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
local_bh_disable();
- ieee80211_tx_status(hw, skb);
+ ieee80211_tx_status_skb(hw, skb);
local_bh_enable();
}
/**
* ieee80211_tx_status_irqsafe - IRQ-safe transmit status callback
*
- * Like ieee80211_tx_status() but can be called in IRQ context
+ * Like ieee80211_tx_status_skb() but can be called in IRQ context
* (internally defers to a tasklet.)
*
- * Calls to this function, ieee80211_tx_status() and
+ * Calls to this function, ieee80211_tx_status_skb() and
* ieee80211_tx_status_ni() may not be mixed for a single hardware.
*
* @hw: the hardware the frame was transmitted by
@@ -5251,7 +5316,8 @@ struct ieee80211_mutable_offsets {
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @offs: &struct ieee80211_mutable_offsets pointer to struct that will
* receive the offsets that may be updated by the driver.
- * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
+ * @link_id: the link id to which the beacon belongs (or 0 for an AP STA
+ * that is not associated with AP MLD).
*
* If the driver implements beaconing modes, it must use this function to
* obtain the beacon template.
@@ -5348,7 +5414,8 @@ void ieee80211_beacon_free_ema_list(struct ieee80211_ema_beacons *ema_beacons);
* @tim_length: pointer to variable that will receive the TIM IE length,
* (including the ID and length bytes!).
* Set to 0 if invalid (in non-AP modes).
- * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
+ * @link_id: the link id to which the beacon belongs (or 0 for an AP STA
+ * that is not associated with AP MLD).
*
* If the driver implements beaconing modes, it must use this function to
* obtain the beacon frame.
@@ -5371,7 +5438,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
* ieee80211_beacon_get - beacon generation function
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
- * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP)
+ * @link_id: the link id to which the beacon belongs (or 0 for an AP STA
+ * that is not associated with AP MLD).
*
* See ieee80211_beacon_get_tim().
*
@@ -5751,12 +5819,11 @@ void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
* ieee80211_remove_key - remove the given key
* @keyconf: the parameter passed with the set key
*
+ * Context: Must be called with the wiphy mutex held.
+ *
* Remove the given key. If the key was uploaded to the hardware at the
* time this function is called, it is not deleted in the hardware but
* instead assumed to have been removed already.
- *
- * Note that due to locking considerations this function can (currently)
- * only be called during key iteration (ieee80211_iter_keys().)
*/
void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
@@ -6310,12 +6377,12 @@ ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
* @iter: iterator function that will be called for each key
* @iter_data: custom data to pass to the iterator function
*
+ * Context: Must be called with wiphy mutex held; can sleep.
+ *
* This function can be used to iterate all the keys known to
* mac80211, even those that weren't previously programmed into
* the device. This is intended for use in WoWLAN if the device
- * needs reprogramming of the keys during suspend. Note that due
- * to locking reasons, it is also only safe to call this at few
- * spots since it must hold the RTNL and be able to sleep.
+ * needs reprogramming of the keys during suspend.
*
* The order in which the keys are iterated matches the order
* in which they were originally installed and handed to the
@@ -6505,11 +6572,14 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw);
* ieee80211_chswitch_done - Complete channel switch process
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @success: make the channel switch successful or not
+ * @link_id: the link_id on which the switch was done. Ignored if success is
+ * false.
*
* Complete the channel switch post-process: set the new operational channel
* and wake up the suspended queues.
*/
-void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success);
+void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success,
+ unsigned int link_id);
/**
* ieee80211_channel_switch_disconnect - disconnect due to channel switch error
@@ -6578,6 +6648,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
* marks frames marked in the bitmap as having been filtered. Afterwards, it
* checks if any frames in the window starting from @ssn can now be released
* (in case they were only waiting for frames that were filtered.)
+ * (Only work correctly if @max_rx_aggregation_subframes <= 64 frames)
*/
void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
u16 ssn, u64 filtered,
@@ -6862,6 +6933,48 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
}
/**
+ * ieee80211_get_he_iftype_cap_vif - return HE capabilities for sband/vif
+ * @sband: the sband to search for the iftype on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: pointer to the struct ieee80211_sta_he_cap, or %NULL is none found
+ */
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_iftype_cap_vif(const struct ieee80211_supported_band *sband,
+ struct ieee80211_vif *vif)
+{
+ return ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
+ * ieee80211_get_he_6ghz_capa_vif - return HE 6 GHz capabilities
+ * @sband: the sband to search for the STA on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: the 6GHz capabilities
+ */
+static inline __le16
+ieee80211_get_he_6ghz_capa_vif(const struct ieee80211_supported_band *sband,
+ struct ieee80211_vif *vif)
+{
+ return ieee80211_get_he_6ghz_capa(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
+ * ieee80211_get_eht_iftype_cap_vif - return ETH capabilities for sband/vif
+ * @sband: the sband to search for the iftype on
+ * @vif: the vif to get the iftype from
+ *
+ * Return: pointer to the struct ieee80211_sta_eht_cap, or %NULL is none found
+ */
+static inline const struct ieee80211_sta_eht_cap *
+ieee80211_get_eht_iftype_cap_vif(const struct ieee80211_supported_band *sband,
+ struct ieee80211_vif *vif)
+{
+ return ieee80211_get_eht_iftype_cap(sband, ieee80211_vif_type_p2p(vif));
+}
+
+/**
* ieee80211_update_mu_groups - set the VHT MU-MIMO groud data
*
* @vif: the specified virtual interface
@@ -7162,7 +7275,7 @@ ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
*
* This function is used to check whether given txq is allowed to transmit by
* the airtime scheduler, and can be used by drivers to access the airtime
- * fairness accounting without going using the scheduling order enfored by
+ * fairness accounting without using the scheduling order enforced by
* next_txq().
*
* Returns %true if the airtime scheduler thinks the TXQ should be allowed to
@@ -7331,6 +7444,9 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
* @vif: interface to set active links on
* @active_links: the new active links bitmap
*
+ * Context: Must be called with wiphy mutex held; may sleep; calls
+ * back into the driver.
+ *
* This changes the active links on an interface. The interface
* must be in client mode (in AP mode, all links are always active),
* and @active_links must be a subset of the vif's valid_links.
@@ -7338,6 +7454,7 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
* If a link is switched off and another is switched on at the same
* time (e.g. active_links going from 0x1 to 0x10) then you will get
* a sequence of calls like
+ *
* - change_vif_links(0x11)
* - unassign_vif_chanctx(link_id=0)
* - change_sta_links(0x11) for each affected STA (the AP)
@@ -7347,10 +7464,6 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb)
* - change_sta_links(0x10) for each affected STA (the AP)
* - assign_vif_chanctx(link_id=4)
* - change_vif_links(0x10)
- *
- * Note: This function acquires some mac80211 locks and must not
- * be called with any driver locks held that could cause a
- * lock dependency inversion. Best call it without locks.
*/
int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links);
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 5b9c61c4d3a6..dbd22180cc5c 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -8,6 +8,7 @@
#define _NET_MACSEC_H_
#include <linux/u64_stats_sync.h>
+#include <linux/if_vlan.h>
#include <uapi/linux/if_link.h>
#include <uapi/linux/if_macsec.h>
@@ -246,6 +247,23 @@ struct macsec_secy {
/**
* struct macsec_context - MACsec context for hardware offloading
+ * @netdev: a valid pointer to a struct net_device if @offload ==
+ * MACSEC_OFFLOAD_MAC
+ * @phydev: a valid pointer to a struct phy_device if @offload ==
+ * MACSEC_OFFLOAD_PHY
+ * @offload: MACsec offload status
+ * @secy: pointer to a MACsec SecY
+ * @rx_sc: pointer to a RX SC
+ * @update_pn: when updating the SA, update the next PN
+ * @assoc_num: association number of the target SA
+ * @key: key of the target SA
+ * @rx_sa: pointer to an RX SA if a RX SA is added/updated/removed
+ * @tx_sa: pointer to an TX SA if a TX SA is added/updated/removed
+ * @tx_sc_stats: pointer to TX SC stats structure
+ * @tx_sa_stats: pointer to TX SA stats structure
+ * @rx_sc_stats: pointer to RX SC stats structure
+ * @rx_sa_stats: pointer to RX SA stats structure
+ * @dev_stats: pointer to dev stats structure
*/
struct macsec_context {
union {
@@ -257,6 +275,7 @@ struct macsec_context {
struct macsec_secy *secy;
struct macsec_rx_sc *rx_sc;
struct {
+ bool update_pn;
unsigned char assoc_num;
u8 key[MACSEC_MAX_KEY_LEN];
union {
@@ -275,6 +294,33 @@ struct macsec_context {
/**
* struct macsec_ops - MACsec offloading operations
+ * @mdo_dev_open: called when the MACsec interface transitions to the up state
+ * @mdo_dev_stop: called when the MACsec interface transitions to the down
+ * state
+ * @mdo_add_secy: called when a new SecY is added
+ * @mdo_upd_secy: called when the SecY flags are changed or the MAC address of
+ * the MACsec interface is changed
+ * @mdo_del_secy: called when the hw offload is disabled or the MACsec
+ * interface is removed
+ * @mdo_add_rxsc: called when a new RX SC is added
+ * @mdo_upd_rxsc: called when a certain RX SC is updated
+ * @mdo_del_rxsc: called when a certain RX SC is removed
+ * @mdo_add_rxsa: called when a new RX SA is added
+ * @mdo_upd_rxsa: called when a certain RX SA is updated
+ * @mdo_del_rxsa: called when a certain RX SA is removed
+ * @mdo_add_txsa: called when a new TX SA is added
+ * @mdo_upd_txsa: called when a certain TX SA is updated
+ * @mdo_del_txsa: called when a certain TX SA is removed
+ * @mdo_get_dev_stats: called when dev stats are read
+ * @mdo_get_tx_sc_stats: called when TX SC stats are read
+ * @mdo_get_tx_sa_stats: called when TX SA stats are read
+ * @mdo_get_rx_sc_stats: called when RX SC stats are read
+ * @mdo_get_rx_sa_stats: called when RX SA stats are read
+ * @mdo_insert_tx_tag: called to insert the TX tag
+ * @needed_headroom: number of bytes reserved at the beginning of the sk_buff
+ * for the TX tag
+ * @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
+ * TX tag
*/
struct macsec_ops {
/* Device wide */
@@ -301,6 +347,11 @@ struct macsec_ops {
int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
+ /* Offload tag */
+ int (*mdo_insert_tx_tag)(struct phy_device *phydev,
+ struct sk_buff *skb);
+ unsigned int needed_headroom;
+ unsigned int needed_tailroom;
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
@@ -311,5 +362,21 @@ static inline bool macsec_send_sci(const struct macsec_secy *secy)
return tx_sc->send_sci ||
(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
}
+struct net_device *macsec_get_real_dev(const struct net_device *dev);
+bool macsec_netdev_is_offloaded(struct net_device *dev);
+
+static inline void *macsec_netdev_priv(const struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ if (is_vlan_dev(dev))
+ return netdev_priv(vlan_dev_priv(dev)->real_dev);
+#endif
+ return netdev_priv(dev);
+}
+
+static inline u64 sci_to_cpu(sci_t sci)
+{
+ return be64_to_cpu((__force __be64)sci);
+}
#endif /* _NET_MACSEC_H_ */
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 96c120160f15..27684135bb4d 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -33,6 +33,7 @@ enum gdma_request_type {
GDMA_DESTROY_PD = 30,
GDMA_CREATE_MR = 31,
GDMA_DESTROY_MR = 32,
+ GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
};
#define GDMA_RESOURCE_DOORBELL_PAGE 27
@@ -57,12 +58,15 @@ enum gdma_eqe_type {
GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
GDMA_EQE_HWC_INIT_DATA = 130,
GDMA_EQE_HWC_INIT_DONE = 131,
+ GDMA_EQE_HWC_SOC_RECONFIG = 132,
+ GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
};
enum {
GDMA_DEVICE_NONE = 0,
GDMA_DEVICE_HWC = 1,
GDMA_DEVICE_MANA = 2,
+ GDMA_DEVICE_MANA_IB = 3,
};
struct gdma_resource {
@@ -146,6 +150,7 @@ struct gdma_general_req {
#define GDMA_MESSAGE_V1 1
#define GDMA_MESSAGE_V2 2
+#define GDMA_MESSAGE_V3 3
struct gdma_general_resp {
struct gdma_resp_hdr hdr;
@@ -290,6 +295,7 @@ struct gdma_queue {
u32 head;
u32 tail;
+ struct list_head entry;
/* Extra fields specific to EQ/CQ. */
union {
@@ -325,6 +331,7 @@ struct gdma_queue_spec {
void *context;
unsigned long log2_throttle_limit;
+ unsigned int msix_index;
} eq;
struct {
@@ -341,7 +348,9 @@ struct gdma_queue_spec {
struct gdma_irq_context {
void (*handler)(void *arg);
- void *arg;
+ /* Protect the eq_list */
+ spinlock_t lock;
+ struct list_head eq_list;
char name[MANA_IRQ_NAME_SZ];
};
@@ -352,7 +361,6 @@ struct gdma_context {
unsigned int max_num_queues;
unsigned int max_num_msix;
unsigned int num_msix_usable;
- struct gdma_resource msix_resource;
struct gdma_irq_context *irq_contexts;
/* L2 MTU */
@@ -384,6 +392,9 @@ struct gdma_context {
/* Azure network adapter */
struct gdma_dev mana;
+
+ /* Azure RDMA adapter */
+ struct gdma_dev mana_ib;
};
#define MAX_NUM_GDMA_DEVICES 4
@@ -531,10 +542,12 @@ enum {
* so the driver is able to reliably support features like busy_poll.
*/
#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
+#define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
#define GDMA_DRV_CAP_FLAGS1 \
(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
- GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
+ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
+ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG)
#define GDMA_DRV_CAP_FLAGS2 0
@@ -664,6 +677,19 @@ struct gdma_disable_queue_req {
u32 alloc_res_id_on_creation;
}; /* HW DATA */
+/* GDMA_QUERY_HWC_TIMEOUT */
+struct gdma_query_hwc_timeout_req {
+ struct gdma_req_hdr hdr;
+ u32 timeout_ms;
+ u32 reserved;
+};
+
+struct gdma_query_hwc_timeout_resp {
+ struct gdma_resp_hdr hdr;
+ u32 timeout_ms;
+ u32 reserved;
+};
+
enum atb_page_size {
ATB_PAGE_SIZE_4K,
ATB_PAGE_SIZE_8K,
diff --git a/include/net/mana/hw_channel.h b/include/net/mana/hw_channel.h
index 6a757a6e2732..158b125692c2 100644
--- a/include/net/mana/hw_channel.h
+++ b/include/net/mana/hw_channel.h
@@ -23,6 +23,10 @@
#define HWC_INIT_DATA_PF_DEST_RQ_ID 10
#define HWC_INIT_DATA_PF_DEST_CQ_ID 11
+#define HWC_DATA_CFG_HWC_TIMEOUT 1
+
+#define HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS 30000
+
/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
* them are naturally aligned and hence don't need __packed.
*/
@@ -117,7 +121,7 @@ struct hwc_dma_buf {
u32 gpa_mkey;
u32 num_reqs;
- struct hwc_work_request reqs[];
+ struct hwc_work_request reqs[] __counted_by(num_reqs);
};
typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id,
@@ -182,6 +186,7 @@ struct hw_channel_context {
u32 pf_dest_vrq_id;
u32 pf_dest_vrcq_id;
+ u32 hwc_timeout;
struct hwc_caller_ctx *caller_ctx;
};
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 9eef19972845..76147feb0d10 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -4,6 +4,8 @@
#ifndef _MANA_H
#define _MANA_H
+#include <net/xdp.h>
+
#include "gdma.h"
#include "hw_channel.h"
@@ -101,9 +103,10 @@ struct mana_txq {
/* skb data and frags dma mappings */
struct mana_skb_head {
- dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
+ /* GSO pkts may have 2 SGEs for the linear part*/
+ dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
- u32 size[MAX_SKB_FRAGS + 1];
+ u32 size[MAX_SKB_FRAGS + 2];
};
#define MANA_HEADROOM sizeof(struct mana_skb_head)
@@ -280,6 +283,7 @@ struct mana_recv_buf_oob {
struct gdma_wqe_request wqe_req;
void *buf_va;
+ bool from_pool; /* allocated from a page pool */
/* SGL of the buffer going to be sent has part of the work request. */
u32 num_sge;
@@ -330,10 +334,12 @@ struct mana_rxq {
bool xdp_flush;
int xdp_rc; /* XDP redirect return code */
+ struct page_pool *page_pool;
+
/* MUST BE THE LAST MEMBER:
* Each receive buffer has an associated mana_recv_buf_oob.
*/
- struct mana_recv_buf_oob rx_oobs[];
+ struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
};
struct mana_tx_qp {
@@ -347,6 +353,33 @@ struct mana_tx_qp {
struct mana_ethtool_stats {
u64 stop_queue;
u64 wake_queue;
+ u64 hc_rx_discards_no_wqe;
+ u64 hc_rx_err_vport_disabled;
+ u64 hc_rx_bytes;
+ u64 hc_rx_ucast_pkts;
+ u64 hc_rx_ucast_bytes;
+ u64 hc_rx_bcast_pkts;
+ u64 hc_rx_bcast_bytes;
+ u64 hc_rx_mcast_pkts;
+ u64 hc_rx_mcast_bytes;
+ u64 hc_tx_err_gf_disabled;
+ u64 hc_tx_err_vport_disabled;
+ u64 hc_tx_err_inval_vportoffset_pkt;
+ u64 hc_tx_err_vlan_enforcement;
+ u64 hc_tx_err_eth_type_enforcement;
+ u64 hc_tx_err_sa_enforcement;
+ u64 hc_tx_err_sqpdid_enforcement;
+ u64 hc_tx_err_cqpdid_enforcement;
+ u64 hc_tx_err_mtu_violation;
+ u64 hc_tx_err_inval_oob;
+ u64 hc_tx_bytes;
+ u64 hc_tx_ucast_pkts;
+ u64 hc_tx_ucast_bytes;
+ u64 hc_tx_bcast_pkts;
+ u64 hc_tx_bcast_bytes;
+ u64 hc_tx_mcast_pkts;
+ u64 hc_tx_mcast_bytes;
+ u64 hc_tx_err_gdma;
u64 tx_cqe_err;
u64 tx_cqe_unknown_type;
u64 rx_coalesced_err;
@@ -437,6 +470,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+void mana_query_gf_stats(struct mana_port_context *apc);
extern const struct ethtool_ops mana_ethtool_ops;
@@ -578,8 +612,51 @@ struct mana_fence_rq_resp {
struct gdma_resp_hdr hdr;
}; /* HW DATA */
+/* Query stats RQ */
+struct mana_query_gf_stat_req {
+ struct gdma_req_hdr hdr;
+ u64 req_stats;
+}; /* HW DATA */
+
+struct mana_query_gf_stat_resp {
+ struct gdma_resp_hdr hdr;
+ u64 reported_stats;
+ /* rx errors/discards */
+ u64 rx_discards_nowqe;
+ u64 rx_err_vport_disabled;
+ /* rx bytes/packets */
+ u64 hc_rx_bytes;
+ u64 hc_rx_ucast_pkts;
+ u64 hc_rx_ucast_bytes;
+ u64 hc_rx_bcast_pkts;
+ u64 hc_rx_bcast_bytes;
+ u64 hc_rx_mcast_pkts;
+ u64 hc_rx_mcast_bytes;
+ /* tx errors */
+ u64 tx_err_gf_disabled;
+ u64 tx_err_vport_disabled;
+ u64 tx_err_inval_vport_offset_pkt;
+ u64 tx_err_vlan_enforcement;
+ u64 tx_err_ethtype_enforcement;
+ u64 tx_err_SA_enforcement;
+ u64 tx_err_SQPDID_enforcement;
+ u64 tx_err_CQPDID_enforcement;
+ u64 tx_err_mtu_violation;
+ u64 tx_err_inval_oob;
+ /* tx bytes/packets */
+ u64 hc_tx_bytes;
+ u64 hc_tx_ucast_pkts;
+ u64 hc_tx_ucast_bytes;
+ u64 hc_tx_bcast_pkts;
+ u64 hc_tx_bcast_bytes;
+ u64 hc_tx_mcast_pkts;
+ u64 hc_tx_mcast_bytes;
+ /* tx error */
+ u64 tx_err_gdma;
+}; /* HW DATA */
+
/* Configure vPort Rx Steering */
-struct mana_cfg_rx_steer_req {
+struct mana_cfg_rx_steer_req_v2 {
struct gdma_req_hdr hdr;
mana_handle_t vport;
u16 num_indir_entries;
@@ -592,6 +669,8 @@ struct mana_cfg_rx_steer_req {
u8 reserved;
mana_handle_t default_rxobj;
u8 hashkey[MANA_HASH_KEY_SIZE];
+ u8 cqe_coalescing_enable;
+ u8 reserved2[7];
}; /* HW DATA */
struct mana_cfg_rx_steer_resp {
@@ -655,6 +734,42 @@ struct mana_deregister_filter_resp {
struct gdma_resp_hdr hdr;
}; /* HW DATA */
+/* Requested GF stats Flags */
+/* Rx discards/Errors */
+#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001
+#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002
+/* Rx bytes/pkts */
+#define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004
+#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008
+#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010
+#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020
+#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040
+#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080
+#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100
+/* Tx errors */
+#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200
+#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400
+#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \
+ 0x0000000000000800
+#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000
+#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \
+ 0x0000000000002000
+#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000
+#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000
+#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000
+#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000
+#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000
+/* Tx bytes/pkts */
+#define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000
+#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000
+#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000
+#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000
+#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000
+#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000
+#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000
+/* Tx error */
+#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
+
#define MANA_MAX_NUM_QUEUES 64
#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
diff --git a/include/net/mctp.h b/include/net/mctp.h
index 82800d521c3d..2bff5f47ce82 100644
--- a/include/net/mctp.h
+++ b/include/net/mctp.h
@@ -234,9 +234,9 @@ struct mctp_flow {
struct mctp_route {
mctp_eid_t min, max;
- struct mctp_dev *dev;
- unsigned int mtu;
unsigned char type;
+ unsigned int mtu;
+ struct mctp_dev *dev;
int (*output)(struct mctp_route *route,
struct sk_buff *skb);
@@ -249,6 +249,7 @@ struct mctp_route {
struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
mctp_eid_t daddr);
+/* always takes ownership of skb */
int mctp_local_output(struct sock *sk, struct mctp_route *rt,
struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag);
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 3c5c68618fcc..fb996124b3d5 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -96,6 +96,27 @@ struct mptcp_out_options {
#endif
};
+#define MPTCP_SCHED_NAME_MAX 16
+#define MPTCP_SUBFLOWS_MAX 8
+
+struct mptcp_sched_data {
+ bool reinject;
+ u8 subflows;
+ struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX];
+};
+
+struct mptcp_sched_ops {
+ int (*get_subflow)(struct mptcp_sock *msk,
+ struct mptcp_sched_data *data);
+
+ char name[MPTCP_SCHED_NAME_MAX];
+ struct module *owner;
+ struct list_head list;
+
+ void (*init)(struct mptcp_sock *msk);
+ void (*release)(struct mptcp_sock *msk);
+} ____cacheline_aligned_in_smp;
+
#ifdef CONFIG_MPTCP
void mptcp_init(void);
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 52eae0943433..9bbdf6eaa942 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -488,9 +488,6 @@ void igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
-int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen);
#endif
void inet6_ifinfo_notify(int event, struct inet6_dev *idev);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index f6a8ecc6b1fa..0d28172193fa 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -162,7 +162,7 @@ struct neighbour {
struct rcu_head rcu;
struct net_device *dev;
netdevice_tracker dev_tracker;
- u8 primary_key[0];
+ u8 primary_key[];
} __randomize_layout;
struct neigh_ops {
@@ -394,8 +394,6 @@ void neigh_for_each(struct neigh_table *tbl,
void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *));
int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
-void pneigh_for_each(struct neigh_table *tbl,
- void (*cb)(struct pneigh_entry *));
struct neigh_seq_state {
struct seq_net_private p;
@@ -541,7 +539,7 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
READ_ONCE(hh->hh_len))
return neigh_hh_output(hh, skb);
- return n->output(n, skb);
+ return READ_ONCE(n->output)(n, skb);
}
static inline struct neighbour *
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 78beaa765c73..13b3a4e29fdb 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -42,6 +42,7 @@
#include <linux/idr.h>
#include <linux/skbuff.h>
#include <linux/notifier.h>
+#include <linux/xarray.h>
struct user_namespace;
struct proc_dir_entry;
@@ -69,7 +70,7 @@ struct net {
atomic_t dev_unreg_count;
unsigned int dev_base_seq; /* protected by rtnl_mutex */
- int ifindex;
+ u32 ifindex;
spinlock_t nsid_lock;
atomic_t fnhe_genid;
@@ -110,6 +111,7 @@ struct net {
struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
+ struct xarray dev_by_index;
struct raw_notifier_head netdev_chain;
/* Note that @hash_mix can be read millions times per second,
@@ -366,21 +368,30 @@ static inline void put_net_track(struct net *net, netns_tracker *tracker)
typedef struct {
#ifdef CONFIG_NET_NS
- struct net *net;
+ struct net __rcu *net;
#endif
} possible_net_t;
static inline void write_pnet(possible_net_t *pnet, struct net *net)
{
#ifdef CONFIG_NET_NS
- pnet->net = net;
+ rcu_assign_pointer(pnet->net, net);
#endif
}
static inline struct net *read_pnet(const possible_net_t *pnet)
{
#ifdef CONFIG_NET_NS
- return pnet->net;
+ return rcu_dereference_protected(pnet->net, true);
+#else
+ return &init_net;
+#endif
+}
+
+static inline struct net *read_pnet_rcu(possible_net_t *pnet)
+{
+#ifdef CONFIG_NET_NS
+ return rcu_dereference(pnet->net);
#else
return &init_net;
#endif
@@ -469,15 +480,17 @@ void unregister_pernet_device(struct pernet_operations *);
struct ctl_table;
+#define register_net_sysctl(net, path, table) \
+ register_net_sysctl_sz(net, path, table, ARRAY_SIZE(table))
#ifdef CONFIG_SYSCTL
int net_sysctl_init(void);
-struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
- struct ctl_table *table);
+struct ctl_table_header *register_net_sysctl_sz(struct net *net, const char *path,
+ struct ctl_table *table, size_t table_size);
void unregister_net_sysctl_table(struct ctl_table_header *header);
#else
static inline int net_sysctl_init(void) { return 0; }
-static inline struct ctl_table_header *register_net_sysctl(struct net *net,
- const char *path, struct ctl_table *table)
+static inline struct ctl_table_header *register_net_sysctl_sz(struct net *net,
+ const char *path, struct ctl_table *table, size_t table_size)
{
return NULL;
}
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index d68b0a483431..8b8ed4e13d74 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -128,7 +128,7 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue,
netdev_txq_completed_mb(txq, pkts, bytes); \
\
_res = -1; \
- if (pkts && likely(get_desc > start_thrs)) { \
+ if (pkts && likely(get_desc >= start_thrs)) { \
_res = 1; \
if (unlikely(netif_tx_queue_stopped(txq)) && \
!(down_cond)) { \
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
new file mode 100644
index 000000000000..aa1716fb0e53
--- /dev/null
+++ b/include/net/netdev_rx_queue.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NETDEV_RX_QUEUE_H
+#define _LINUX_NETDEV_RX_QUEUE_H
+
+#include <linux/kobject.h>
+#include <linux/netdevice.h>
+#include <linux/sysfs.h>
+#include <net/xdp.h>
+
+/* This structure contains an instance of an RX queue. */
+struct netdev_rx_queue {
+ struct xdp_rxq_info xdp_rxq;
+#ifdef CONFIG_RPS
+ struct rps_map __rcu *rps_map;
+ struct rps_dev_flow_table __rcu *rps_flow_table;
+#endif
+ struct kobject kobj;
+ struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
+#ifdef CONFIG_XDP_SOCKETS
+ struct xsk_buff_pool *pool;
+#endif
+ /* NAPI instance for the queue
+ * Readers and writers must hold RTNL
+ */
+ struct napi_struct *napi;
+} ____cacheline_aligned_in_smp;
+
+/*
+ * RX queue sysfs structures and functions.
+ */
+struct rx_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
+ ssize_t (*store)(struct netdev_rx_queue *queue,
+ const char *buf, size_t len);
+};
+
+static inline struct netdev_rx_queue *
+__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
+{
+ return dev->_rx + rxq;
+}
+
+#ifdef CONFIG_SYSFS
+static inline unsigned int
+get_netdev_rx_queue_index(struct netdev_rx_queue *queue)
+{
+ struct net_device *dev = queue->dev;
+ int index = queue - dev->_rx;
+
+ BUG_ON(index >= dev->num_rx_queues);
+ return index;
+}
+#endif
+#endif
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index a72028dbef0c..cba3ccf03fcc 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -160,10 +160,6 @@ static inline struct net *nf_ct_net(const struct nf_conn *ct)
return read_pnet(&ct->ct_net);
}
-/* Alter reply tuple (maybe alter helper). */
-void nf_conntrack_alter_reply(struct nf_conn *ct,
- const struct nf_conntrack_tuple *newreply);
-
/* Is this tuple taken? (ignoring any belonging to the given
conntrack). */
int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
@@ -190,10 +186,6 @@ static inline void nf_ct_put(struct nf_conn *ct)
nf_ct_destroy(&ct->ct_general);
}
-/* Protocol module loading */
-int nf_ct_l3proto_try_module_get(unsigned short l3proto);
-void nf_ct_l3proto_module_put(unsigned short l3proto);
-
/* load module; enable/disable conntrack in this namespace */
int nf_ct_netns_get(struct net *net, u8 nfproto);
void nf_ct_netns_put(struct net *net, u8 nfproto);
@@ -288,6 +280,16 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
}
+static inline void nf_conntrack_alter_reply(struct nf_conn *ct,
+ const struct nf_conntrack_tuple *newreply)
+{
+ /* Must be unconfirmed, so not in hash table yet */
+ if (WARN_ON(nf_ct_is_confirmed(ct)))
+ return;
+
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+}
+
#define nfct_time_stamp ((u32)(jiffies))
/* jiffies until ct expires, 0 if already expired */
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 4b2b7f8914ea..a120685cac93 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -78,6 +78,4 @@ static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir,
void nf_conntrack_acct_pernet_init(struct net *net);
-void nf_conntrack_acct_fini(void);
-
#endif /* _NF_CONNTRACK_ACCT_H */
diff --git a/include/net/netfilter/nf_conntrack_act_ct.h b/include/net/netfilter/nf_conntrack_act_ct.h
index 078d3c52c03f..e5f2f0b73a9a 100644
--- a/include/net/netfilter/nf_conntrack_act_ct.h
+++ b/include/net/netfilter/nf_conntrack_act_ct.h
@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
#endif
}
-static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
+static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ struct nf_conn_act_ct_ext *act_ct_ext;
+
+ act_ct_ext = nf_conn_act_ct_ext_find(ct);
+ if (dev_net(skb->dev) == &init_net && act_ct_ext)
+ act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+#endif
+}
+
+static inline struct
+nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
{
#if IS_ENABLED(CONFIG_NET_ACT_CT)
struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
return act_ct;
act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
return act_ct;
#else
return NULL;
#endif
}
-static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
-#if IS_ENABLED(CONFIG_NET_ACT_CT)
- struct nf_conn_act_ct_ext *act_ct_ext;
-
- act_ct_ext = nf_conn_act_ct_ext_find(ct);
- if (dev_net(skb->dev) == &init_net && act_ct_ext)
- act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
-#endif
-}
-
#endif /* _NF_CONNTRACK_ACT_CT_H */
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 0855b60fba17..165e7a03b8e9 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -26,6 +26,15 @@ struct nf_conntrack_expect {
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_mask mask;
+ /* Usage count. */
+ refcount_t use;
+
+ /* Flags */
+ unsigned int flags;
+
+ /* Expectation class */
+ unsigned int class;
+
/* Function to call after setup and insertion */
void (*expectfn)(struct nf_conn *new,
struct nf_conntrack_expect *this);
@@ -39,15 +48,6 @@ struct nf_conntrack_expect {
/* Timer function; deletes the expectation. */
struct timer_list timeout;
- /* Usage count. */
- refcount_t use;
-
- /* Flags */
- unsigned int flags;
-
- /* Expectation class */
- unsigned int class;
-
#if IS_ENABLED(CONFIG_NF_NAT)
union nf_inet_addr saved_addr;
/* This is the original per-proto part, used to map the
@@ -100,7 +100,7 @@ nf_ct_expect_find_get(struct net *net,
struct nf_conntrack_expect *
nf_ct_find_expectation(struct net *net,
const struct nf_conntrack_zone *zone,
- const struct nf_conntrack_tuple *tuple);
+ const struct nf_conntrack_tuple *tuple, bool unlink);
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
u32 portid, int report);
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index f30b1694b690..de2f956abf34 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -136,8 +136,6 @@ static inline void *nfct_help_data(const struct nf_conn *ct)
return (void *)help->data;
}
-void nf_conntrack_helper_pernet_init(struct net *net);
-
int nf_conntrack_helper_init(void);
void nf_conntrack_helper_fini(void);
@@ -182,5 +180,4 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat);
int nf_nat_helper_try_module_get(const char *name, u16 l3num,
u8 protonum);
void nf_nat_helper_put(struct nf_conntrack_helper *helper);
-void nf_ct_set_auto_assign_helper_warned(struct net *net);
#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 66bab6c60d12..6903f72bcc15 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -39,7 +39,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
#ifdef CONFIG_NF_CONNTRACK_LABELS
struct net *net = nf_ct_net(ct);
- if (net->ct.labels_used == 0)
+ if (atomic_read(&net->ct.labels_used) == 0)
return NULL;
return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC);
@@ -52,7 +52,6 @@ int nf_connlabels_replace(struct nf_conn *ct,
const u32 *data, const u32 *mask, unsigned int words);
#ifdef CONFIG_NF_CONNTRACK_LABELS
-int nf_conntrack_labels_init(void);
int nf_connlabels_get(struct net *net, unsigned int bit);
void nf_connlabels_put(struct net *net);
#else
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 9334371c94e2..f7dd950ff250 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -67,6 +67,9 @@ struct nf_conntrack_tuple {
/* The protocol. */
u_int8_t protonum;
+ /* The direction must be ignored for the tuplehash */
+ struct { } __nfct_hash_offsetend;
+
/* The direction (for tuplehash) */
u_int8_t dir;
} dst;
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index f37f9f34430c..a763dd327c6e 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -53,6 +53,7 @@ struct nf_flowtable_type {
struct list_head list;
int family;
int (*init)(struct nf_flowtable *ft);
+ bool (*gc)(const struct flow_offload *flow);
int (*setup)(struct nf_flowtable *ft,
struct net_device *dev,
enum flow_block_command cmd);
@@ -61,6 +62,8 @@ struct nf_flowtable_type {
enum flow_offload_tuple_dir dir,
struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
+ void (*get)(struct nf_flowtable *ft);
+ void (*put)(struct nf_flowtable *ft);
nf_hookfn *hook;
struct module *owner;
};
@@ -71,12 +74,13 @@ enum nf_flowtable_flags {
};
struct nf_flowtable {
- struct list_head list;
- struct rhashtable rhashtable;
- int priority;
+ unsigned int flags; /* readonly in datapath */
+ int priority; /* control path (padding hole) */
+ struct rhashtable rhashtable; /* datapath, read-mostly members come first */
+
+ struct list_head list; /* slowpath parts */
const struct nf_flowtable_type *type;
struct delayed_work gc_work;
- unsigned int flags;
struct flow_block flow_block;
struct rw_semaphore flow_block_lock; /* Guards flow_block */
possible_net_t net;
@@ -239,6 +243,11 @@ nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
}
list_add_tail(&block_cb->list, &block->cb_list);
+ up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->get)
+ flow_table->type->get(flow_table);
+ return 0;
unlock:
up_write(&flow_table->flow_block_lock);
@@ -261,10 +270,13 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
WARN_ON(true);
}
up_write(&flow_table->flow_block_lock);
+
+ if (flow_table->type->put)
+ flow_table->type->put(flow_table);
}
-int flow_offload_route_init(struct flow_offload *flow,
- const struct nf_flow_route *route);
+void flow_offload_route_init(struct flow_offload *flow,
+ struct nf_flow_route *route);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
void flow_offload_refresh(struct nf_flowtable *flow_table,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index ee47d7143d99..510244cc0f8f 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -178,9 +178,9 @@ static inline __be32 nft_reg_load_be32(const u32 *sreg)
return *(__force __be32 *)sreg;
}
-static inline void nft_reg_store64(u32 *dreg, u64 val)
+static inline void nft_reg_store64(u64 *dreg, u64 val)
{
- put_unaligned(val, (u64 *)dreg);
+ put_unaligned(val, dreg);
}
static inline u64 nft_reg_load64(const u32 *sreg)
@@ -205,6 +205,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
* @nla: netlink attributes
* @portid: netlink portID of the original message
* @seq: netlink sequence number
+ * @flags: modifiers to new request
* @family: protocol family
* @level: depth of the chains
* @report: notify via unicast netlink message
@@ -274,11 +275,15 @@ struct nft_userdata {
unsigned char data[];
};
+/* placeholder structure for opaque set element backend representation. */
+struct nft_elem_priv { };
+
/**
* struct nft_set_elem - generic representation of set elements
*
* @key: element key
* @key_end: closing element key
+ * @data: element data
* @priv: element private data and extensions
*/
struct nft_set_elem {
@@ -294,9 +299,14 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} data;
- void *priv;
+ struct nft_elem_priv *priv;
};
+static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
+{
+ return (void *)priv;
+}
+
struct nft_set;
struct nft_set_iter {
u8 genmask;
@@ -306,7 +316,7 @@ struct nft_set_iter {
int (*fn)(const struct nft_ctx *ctx,
struct nft_set *set,
const struct nft_set_iter *iter,
- struct nft_set_elem *elem);
+ struct nft_elem_priv *elem_priv);
};
/**
@@ -317,10 +327,10 @@ struct nft_set_iter {
* @dtype: data type
* @dlen: data length
* @objtype: object type
- * @flags: flags
* @size: number of set elements
* @policy: set policy
* @gc_int: garbage collector interval
+ * @timeout: element timeout
* @field_len: length of each field in concatenation, bytes
* @field_count: number of concatenated fields in element
* @expr: set must support for expressions
@@ -343,9 +353,9 @@ struct nft_set_desc {
/**
* enum nft_set_class - performance class
*
- * @NFT_LOOKUP_O_1: constant, O(1)
- * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
- * @NFT_LOOKUP_O_N: linear, O(N)
+ * @NFT_SET_CLASS_O_1: constant, O(1)
+ * @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N)
+ * @NFT_SET_CLASS_O_N: linear, O(N)
*/
enum nft_set_class {
NFT_SET_CLASS_O_1,
@@ -414,9 +424,13 @@ struct nft_set_ext;
* @remove: remove element from set
* @walk: iterate over all set elements
* @get: get set elements
+ * @commit: commit set elements
+ * @abort: abort set elements
* @privsize: function to return size of set private data
+ * @estimate: estimate the required memory size and the lookup complexity class
* @init: initialize private data of new set instance
* @destroy: destroy private data of set instance
+ * @gc_init: initialize garbage collection
* @elemsize: element private size
*
* Operations lookup, update and delete have simpler interfaces, are faster
@@ -430,7 +444,8 @@ struct nft_set_ops {
const struct nft_set_ext **ext);
bool (*update)(struct nft_set *set,
const u32 *key,
- void *(*new)(struct nft_set *,
+ struct nft_elem_priv *
+ (*new)(struct nft_set *,
const struct nft_expr *,
struct nft_regs *),
const struct nft_expr *expr,
@@ -442,27 +457,27 @@ struct nft_set_ops {
int (*insert)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem,
- struct nft_set_ext **ext);
+ struct nft_elem_priv **priv);
void (*activate)(const struct net *net,
const struct nft_set *set,
- const struct nft_set_elem *elem);
- void * (*deactivate)(const struct net *net,
+ struct nft_elem_priv *elem_priv);
+ struct nft_elem_priv * (*deactivate)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem);
- bool (*flush)(const struct net *net,
+ void (*flush)(const struct net *net,
const struct nft_set *set,
- void *priv);
+ struct nft_elem_priv *priv);
void (*remove)(const struct net *net,
const struct nft_set *set,
- const struct nft_set_elem *elem);
+ struct nft_elem_priv *elem_priv);
void (*walk)(const struct nft_ctx *ctx,
struct nft_set *set,
struct nft_set_iter *iter);
- void * (*get)(const struct net *net,
+ struct nft_elem_priv * (*get)(const struct net *net,
const struct nft_set *set,
const struct nft_set_elem *elem,
unsigned int flags);
- void (*commit)(const struct nft_set *set);
+ void (*commit)(struct nft_set *set);
void (*abort)(const struct nft_set *set);
u64 (*privsize)(const struct nlattr * const nla[],
const struct nft_set_desc *desc);
@@ -512,6 +527,7 @@ struct nft_set_elem_expr {
*
* @list: table set list node
* @bindings: list of set bindings
+ * @refs: internal refcounting for async set destruction
* @table: table this set belongs to
* @net: netnamespace this set belongs to
* @name: name of the set
@@ -530,17 +546,22 @@ struct nft_set_elem_expr {
* @policy: set parameterization (see enum nft_set_policies)
* @udlen: user data length
* @udata: user data
- * @expr: stateful expression
+ * @pending_update: list of pending update set element
* @ops: set ops
* @flags: set flags
+ * @dead: set will be freed, never cleared
* @genmask: generation mask
* @klen: key length
* @dlen: data length
+ * @num_exprs: numbers of exprs
+ * @exprs: stateful expression
+ * @catchall_list: list of catch-all set element
* @data: private set data
*/
struct nft_set {
struct list_head list;
struct list_head bindings;
+ refcount_t refs;
struct nft_table *table;
possible_net_t net;
char *name;
@@ -562,7 +583,8 @@ struct nft_set {
struct list_head pending_update;
/* runtime data below here */
const struct nft_set_ops *ops ____cacheline_aligned;
- u16 flags:14,
+ u16 flags:13,
+ dead:1,
genmask:2;
u8 klen;
u8 dlen;
@@ -583,6 +605,11 @@ static inline void *nft_set_priv(const struct nft_set *set)
return (void *)set->data;
}
+static inline bool nft_set_gc_is_pending(const struct nft_set *s)
+{
+ return refcount_read(&s->refs) != 1;
+}
+
static inline struct nft_set *nft_set_container_of(const void *priv)
{
return (void *)priv - offsetof(struct nft_set, data);
@@ -596,7 +623,6 @@ struct nft_set *nft_set_lookup_global(const struct net *net,
struct nft_set_ext *nft_set_catchall_lookup(const struct net *net,
const struct nft_set *set);
-void *nft_set_catchall_gc(const struct nft_set *set);
static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
{
@@ -675,6 +701,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
*
* @len: length of extension area
* @offset: offsets of individual extension types
+ * @ext_len: length of the expected extension(used to sanity check)
*/
struct nft_set_ext_tmpl {
u16 len;
@@ -781,16 +808,22 @@ static inline struct nft_set_elem_expr *nft_set_ext_expr(const struct nft_set_ex
return nft_set_ext(ext, NFT_SET_EXT_EXPRESSIONS);
}
-static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+static inline bool __nft_set_elem_expired(const struct nft_set_ext *ext,
+ u64 tstamp)
{
return nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION) &&
- time_is_before_eq_jiffies64(*nft_set_ext_expiration(ext));
+ time_after_eq64(tstamp, *nft_set_ext_expiration(ext));
+}
+
+static inline bool nft_set_elem_expired(const struct nft_set_ext *ext)
+{
+ return __nft_set_elem_expired(ext, get_jiffies_64());
}
static inline struct nft_set_ext *nft_set_elem_ext(const struct nft_set *set,
- void *elem)
+ const struct nft_elem_priv *elem_priv)
{
- return elem + set->ops->elemsize;
+ return (void *)elem_priv + set->ops->elemsize;
}
static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext)
@@ -802,72 +835,19 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx,
const struct nft_set *set,
const struct nlattr *attr);
-void *nft_set_elem_init(const struct nft_set *set,
- const struct nft_set_ext_tmpl *tmpl,
- const u32 *key, const u32 *key_end, const u32 *data,
- u64 timeout, u64 expiration, gfp_t gfp);
+struct nft_elem_priv *nft_set_elem_init(const struct nft_set *set,
+ const struct nft_set_ext_tmpl *tmpl,
+ const u32 *key, const u32 *key_end,
+ const u32 *data,
+ u64 timeout, u64 expiration, gfp_t gfp);
int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_expr *expr_array[]);
-void nft_set_elem_destroy(const struct nft_set *set, void *elem,
+void nft_set_elem_destroy(const struct nft_set *set,
+ const struct nft_elem_priv *elem_priv,
bool destroy_expr);
void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
- const struct nft_set *set, void *elem);
-
-/**
- * struct nft_set_gc_batch_head - nf_tables set garbage collection batch
- *
- * @rcu: rcu head
- * @set: set the elements belong to
- * @cnt: count of elements
- */
-struct nft_set_gc_batch_head {
- struct rcu_head rcu;
- const struct nft_set *set;
- unsigned int cnt;
-};
-
-#define NFT_SET_GC_BATCH_SIZE ((PAGE_SIZE - \
- sizeof(struct nft_set_gc_batch_head)) / \
- sizeof(void *))
-
-/**
- * struct nft_set_gc_batch - nf_tables set garbage collection batch
- *
- * @head: GC batch head
- * @elems: garbage collection elements
- */
-struct nft_set_gc_batch {
- struct nft_set_gc_batch_head head;
- void *elems[NFT_SET_GC_BATCH_SIZE];
-};
-
-struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
- gfp_t gfp);
-void nft_set_gc_batch_release(struct rcu_head *rcu);
-
-static inline void nft_set_gc_batch_complete(struct nft_set_gc_batch *gcb)
-{
- if (gcb != NULL)
- call_rcu(&gcb->head.rcu, nft_set_gc_batch_release);
-}
-
-static inline struct nft_set_gc_batch *
-nft_set_gc_batch_check(const struct nft_set *set, struct nft_set_gc_batch *gcb,
- gfp_t gfp)
-{
- if (gcb != NULL) {
- if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems))
- return gcb;
- nft_set_gc_batch_complete(gcb);
- }
- return nft_set_gc_batch_alloc(set, gfp);
-}
-
-static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
- void *elem)
-{
- gcb->elems[gcb->head.cnt++] = elem;
-}
+ const struct nft_set *set,
+ const struct nft_elem_priv *elem_priv);
struct nft_expr_ops;
/**
@@ -876,6 +856,7 @@ struct nft_expr_ops;
* @select_ops: function to select nft_expr_ops
* @release_ops: release nft_expr_ops
* @ops: default ops, used when no select_ops functions is present
+ * @inner_ops: inner ops, used for inner packet operation
* @list: used internally
* @name: Identifier
* @owner: module reference
@@ -917,14 +898,22 @@ struct nft_offload_ctx;
* struct nft_expr_ops - nf_tables expression operations
*
* @eval: Expression evaluation function
+ * @clone: Expression clone function
* @size: full expression size, including private data size
* @init: initialization function
* @activate: activate expression in the next generation
* @deactivate: deactivate expression in next generation
* @destroy: destruction function, called after synchronize_rcu
+ * @destroy_clone: destruction clone function
* @dump: function to dump parameters
- * @type: expression type
* @validate: validate expression, called during loop detection
+ * @reduce: reduce expression
+ * @gc: garbage collection expression
+ * @offload: hardware offload expression
+ * @offload_action: function to report true/false to allocate one slot or not in the flow
+ * offload array
+ * @offload_stats: function to synchronize hardware stats via updating the counter expression
+ * @type: expression type
* @data: extra data to attach to this expression operation
*/
struct nft_expr_ops {
@@ -1077,14 +1066,21 @@ struct nft_rule_blob {
/**
* struct nft_chain - nf_tables chain
*
+ * @blob_gen_0: rule blob pointer to the current generation
+ * @blob_gen_1: rule blob pointer to the future generation
* @rules: list of rules in the chain
* @list: used internally
* @rhlhead: used internally
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
- * @flags: bitmask of enum nft_chain_flags
+ * @flags: bitmask of enum NFTA_CHAIN_FLAGS
+ * @bound: bind or not
+ * @genmask: generation mask
* @name: name of the chain
+ * @udlen: user data length
+ * @udata: user data in the chain
+ * @blob_next: rule blob pointer to the next in the chain
*/
struct nft_chain {
struct nft_rule_blob __rcu *blob_gen_0;
@@ -1109,7 +1105,7 @@ struct nft_chain {
int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
const struct nft_set_iter *iter,
- struct nft_set_elem *elem);
+ struct nft_elem_priv *elem_priv);
int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set);
int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
void nf_tables_unbind_chain(const struct nft_ctx *ctx, struct nft_chain *chain);
@@ -1182,6 +1178,7 @@ struct nft_hook {
* @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
* @type: chain type
* @policy: default policy
+ * @flags: indicate the base chain disabled or not
* @stats: per-cpu chain stats
* @chain: the chain
* @flow_block: flow block (for hardware offload)
@@ -1211,6 +1208,29 @@ int __nft_release_basechain(struct nft_ctx *ctx);
unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
+static inline bool nft_use_inc(u32 *use)
+{
+ if (*use == UINT_MAX)
+ return false;
+
+ (*use)++;
+
+ return true;
+}
+
+static inline void nft_use_dec(u32 *use)
+{
+ WARN_ON_ONCE((*use)-- == 0);
+}
+
+/* For error and abort path: restore use counter to previous state. */
+static inline void nft_use_inc_restore(u32 *use)
+{
+ WARN_ON_ONCE(!nft_use_inc(use));
+}
+
+#define nft_use_dec_restore nft_use_dec
+
/**
* struct nft_table - nf_tables table
*
@@ -1223,10 +1243,13 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
* @hgenerator: handle generator state
* @handle: table handle
* @use: number of chain references to this table
+ * @family:address family
* @flags: table flag (see enum nft_table_flags)
* @genmask: generation mask
- * @afinfo: address family info
+ * @nlpid: netlink port ID
* @name: name of the table
+ * @udlen: length of the user data
+ * @udata: user data
* @validate_state: internal, set when transaction adds jumps
*/
struct nft_table {
@@ -1284,11 +1307,13 @@ struct nft_object_hash_key {
* struct nft_object - nf_tables stateful object
*
* @list: table stateful object list node
- * @key: keys that identify this object
* @rhlhead: nft_objname_ht node
+ * @key: keys that identify this object
* @genmask: generation mask
* @use: number of references to this stateful object
* @handle: unique object handle
+ * @udlen: length of user data
+ * @udata: user data
* @ops: object operations
* @data: object data, layout depends on type
*/
@@ -1296,8 +1321,8 @@ struct nft_object {
struct list_head list;
struct rhlist_head rhlhead;
struct nft_object_hash_key key;
- u32 genmask:2,
- use:30;
+ u32 genmask:2;
+ u32 use;
u64 handle;
u16 udlen;
u8 *udata;
@@ -1332,6 +1357,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
* @type: stateful object numeric type
* @owner: module owner
* @maxattr: maximum netlink attribute
+ * @family: address family for AF-specific object types
* @policy: netlink attribute policy
*/
struct nft_object_type {
@@ -1341,6 +1367,7 @@ struct nft_object_type {
struct list_head list;
u32 type;
unsigned int maxattr;
+ u8 family;
struct module *owner;
const struct nla_policy *policy;
};
@@ -1354,6 +1381,7 @@ struct nft_object_type {
* @destroy: release existing stateful object
* @dump: netlink dump stateful object
* @update: update stateful object
+ * @type: pointer to object type
*/
struct nft_object_ops {
void (*eval)(struct nft_object *obj,
@@ -1389,9 +1417,8 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
* @genmask: generation mask
* @use: number of references to this flow table
* @handle: unique object handle
- * @dev_name: array of device names
+ * @hook_list: hook list for hooks per net_device in flowtables
* @data: rhashtable and garbage collector
- * @ops: array of hooks
*/
struct nft_flowtable {
struct list_head list;
@@ -1399,8 +1426,8 @@ struct nft_flowtable {
char *name;
int hooknum;
int ops_len;
- u32 genmask:2,
- use:30;
+ u32 genmask:2;
+ u32 use;
u64 handle;
/* runtime data below here */
struct list_head hook_list ____cacheline_aligned;
@@ -1534,39 +1561,30 @@ static inline void nft_set_elem_change_active(const struct net *net,
#endif /* IS_ENABLED(CONFIG_NF_TABLES) */
-/*
- * We use a free bit in the genmask field to indicate the element
- * is busy, meaning it is currently being processed either by
- * the netlink API or GC.
- *
- * Even though the genmask is only a single byte wide, this works
- * because the extension structure if fully constant once initialized,
- * so there are no non-atomic write accesses unless it is already
- * marked busy.
- */
-#define NFT_SET_ELEM_BUSY_MASK (1 << 2)
+#define NFT_SET_ELEM_DEAD_MASK (1 << 2)
#if defined(__LITTLE_ENDIAN_BITFIELD)
-#define NFT_SET_ELEM_BUSY_BIT 2
+#define NFT_SET_ELEM_DEAD_BIT 2
#elif defined(__BIG_ENDIAN_BITFIELD)
-#define NFT_SET_ELEM_BUSY_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
+#define NFT_SET_ELEM_DEAD_BIT (BITS_PER_LONG - BITS_PER_BYTE + 2)
#else
#error
#endif
-static inline int nft_set_elem_mark_busy(struct nft_set_ext *ext)
+static inline void nft_set_elem_dead(struct nft_set_ext *ext)
{
unsigned long *word = (unsigned long *)ext;
BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
- return test_and_set_bit(NFT_SET_ELEM_BUSY_BIT, word);
+ set_bit(NFT_SET_ELEM_DEAD_BIT, word);
}
-static inline void nft_set_elem_clear_busy(struct nft_set_ext *ext)
+static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext)
{
unsigned long *word = (unsigned long *)ext;
- clear_bit(NFT_SET_ELEM_BUSY_BIT, word);
+ BUILD_BUG_ON(offsetof(struct nft_set_ext, genmask) != 0);
+ return test_bit(NFT_SET_ELEM_DEAD_BIT, word);
}
/**
@@ -1611,6 +1629,7 @@ struct nft_trans_set {
u64 timeout;
bool update;
bool bound;
+ u32 size;
};
#define nft_trans_set(trans) \
@@ -1625,6 +1644,8 @@ struct nft_trans_set {
(((struct nft_trans_set *)trans->data)->timeout)
#define nft_trans_set_gc_int(trans) \
(((struct nft_trans_set *)trans->data)->gc_int)
+#define nft_trans_set_size(trans) \
+ (((struct nft_trans_set *)trans->data)->size)
struct nft_trans_chain {
struct nft_chain *chain;
@@ -1666,14 +1687,14 @@ struct nft_trans_table {
struct nft_trans_elem {
struct nft_set *set;
- struct nft_set_elem elem;
+ struct nft_elem_priv *elem_priv;
bool bound;
};
#define nft_trans_elem_set(trans) \
(((struct nft_trans_elem *)trans->data)->set)
-#define nft_trans_elem(trans) \
- (((struct nft_trans_elem *)trans->data)->elem)
+#define nft_trans_elem_priv(trans) \
+ (((struct nft_trans_elem *)trans->data)->elem_priv)
#define nft_trans_elem_set_bound(trans) \
(((struct nft_trans_elem *)trans->data)->bound)
@@ -1706,6 +1727,39 @@ struct nft_trans_flowtable {
#define nft_trans_flowtable_flags(trans) \
(((struct nft_trans_flowtable *)trans->data)->flags)
+#define NFT_TRANS_GC_BATCHCOUNT 256
+
+struct nft_trans_gc {
+ struct list_head list;
+ struct net *net;
+ struct nft_set *set;
+ u32 seq;
+ u16 count;
+ struct nft_elem_priv *priv[NFT_TRANS_GC_BATCHCOUNT];
+ struct rcu_head rcu;
+};
+
+struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
+ unsigned int gc_seq, gfp_t gfp);
+void nft_trans_gc_destroy(struct nft_trans_gc *trans);
+
+struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq, gfp_t gfp);
+void nft_trans_gc_queue_async_done(struct nft_trans_gc *gc);
+
+struct nft_trans_gc *nft_trans_gc_queue_sync(struct nft_trans_gc *gc, gfp_t gfp);
+void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans);
+
+void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
+
+struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq);
+struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc);
+
+void nft_setelem_data_deactivate(const struct net *net,
+ const struct nft_set *set,
+ struct nft_elem_priv *elem_priv);
+
int __init nft_chain_filter_init(void);
void nft_chain_filter_fini(void);
@@ -1731,7 +1785,10 @@ struct nftables_pernet {
struct list_head notify_list;
struct mutex commit_mutex;
u64 table_handle;
+ u64 tstamp;
unsigned int base_seq;
+ unsigned int gc_seq;
+ u8 validate_state;
};
extern unsigned int nf_tables_net_id;
@@ -1741,6 +1798,11 @@ static inline struct nftables_pernet *nft_pernet(const struct net *net)
return net_generic(net, nf_tables_net_id);
}
+static inline u64 nft_net_tstamp(const struct net *net)
+{
+ return nft_pernet(net)->tstamp;
+}
+
#define __NFT_REDUCE_READONLY 1UL
#define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index 947973623dc7..60a7d0ce3080 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -30,7 +30,7 @@ static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt)
return -1;
len = iph_totlen(pkt->skb, iph);
- thoff = iph->ihl * 4;
+ thoff = skb_network_offset(pkt->skb) + (iph->ihl * 4);
if (pkt->skb->len < len)
return -1;
else if (len < thoff)
diff --git a/include/net/netkit.h b/include/net/netkit.h
new file mode 100644
index 000000000000..9ec0163739f4
--- /dev/null
+++ b/include/net/netkit.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Isovalent */
+#ifndef __NET_NETKIT_H
+#define __NET_NETKIT_H
+
+#include <linux/bpf.h>
+
+#ifdef CONFIG_NETKIT
+int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
+int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
+INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev));
+#else
+static inline int netkit_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int netkit_link_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int netkit_prog_detach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int netkit_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline struct net_device *netkit_peer_dev(struct net_device *dev)
+{
+ return NULL;
+}
+#endif /* CONFIG_NETKIT */
+#endif /* __NET_NETKIT_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index b12cd957abb4..c19ff921b661 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -128,6 +128,8 @@
* nla_len(nla) length of attribute payload
*
* Attribute Payload Access for Basic Types:
+ * nla_get_uint(nla) get payload for a uint attribute
+ * nla_get_sint(nla) get payload for a sint attribute
* nla_get_u8(nla) get payload for a u8 attribute
* nla_get_u16(nla) get payload for a u16 attribute
* nla_get_u32(nla) get payload for a u32 attribute
@@ -183,6 +185,8 @@ enum {
NLA_REJECT,
NLA_BE16,
NLA_BE32,
+ NLA_SINT,
+ NLA_UINT,
__NLA_TYPE_MAX,
};
@@ -229,6 +233,7 @@ enum nla_policy_validation {
* nested header (or empty); len field is used if
* nested_policy is also used, for the max attr
* number in the nested policy.
+ * NLA_SINT, NLA_UINT,
* NLA_U8, NLA_U16,
* NLA_U32, NLA_U64,
* NLA_S8, NLA_S16,
@@ -260,12 +265,14 @@ enum nla_policy_validation {
* while an array has the nested attributes at another
* level down and the attribute types directly in the
* nesting don't matter.
+ * NLA_UINT,
* NLA_U8,
* NLA_U16,
* NLA_U32,
* NLA_U64,
* NLA_BE16,
* NLA_BE32,
+ * NLA_SINT,
* NLA_S8,
* NLA_S16,
* NLA_S32,
@@ -280,6 +287,7 @@ enum nla_policy_validation {
* or NLA_POLICY_FULL_RANGE_SIGNED() macros instead.
* Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and
* NLA_POLICY_RANGE() macros.
+ * NLA_UINT,
* NLA_U8,
* NLA_U16,
* NLA_U32,
@@ -288,6 +296,7 @@ enum nla_policy_validation {
* to a struct netlink_range_validation that indicates
* the min/max values.
* Use NLA_POLICY_FULL_RANGE().
+ * NLA_SINT,
* NLA_S8,
* NLA_S16,
* NLA_S32,
@@ -351,8 +360,8 @@ struct nla_policy {
const u32 mask;
const char *reject_message;
const struct nla_policy *nested_policy;
- struct netlink_range_validation *range;
- struct netlink_range_validation_signed *range_signed;
+ const struct netlink_range_validation *range;
+ const struct netlink_range_validation_signed *range_signed;
struct {
s16 min, max;
};
@@ -375,12 +384,13 @@ struct nla_policy {
#define NLA_POLICY_BITFIELD32(valid) \
{ .type = NLA_BITFIELD32, .bitfield32_valid = valid }
-#define __NLA_IS_UINT_TYPE(tp) \
- (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || tp == NLA_U64)
+#define __NLA_IS_UINT_TYPE(tp) \
+ (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || \
+ tp == NLA_U64 || tp == NLA_UINT || \
+ tp == NLA_BE16 || tp == NLA_BE32)
#define __NLA_IS_SINT_TYPE(tp) \
- (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64)
-#define __NLA_IS_BEINT_TYPE(tp) \
- (tp == NLA_BE16 || tp == NLA_BE32)
+ (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64 || \
+ tp == NLA_SINT)
#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
#define NLA_ENSURE_UINT_TYPE(tp) \
@@ -394,7 +404,6 @@ struct nla_policy {
#define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \
(__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
__NLA_IS_SINT_TYPE(tp) || \
- __NLA_IS_BEINT_TYPE(tp) || \
tp == NLA_MSECS || \
tp == NLA_BINARY) + tp)
#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
@@ -402,8 +411,6 @@ struct nla_policy {
tp != NLA_REJECT && \
tp != NLA_NESTED && \
tp != NLA_NESTED_ARRAY) + tp)
-#define NLA_ENSURE_BEINT_TYPE(tp) \
- (__NLA_ENSURE(__NLA_IS_BEINT_TYPE(tp)) + tp)
#define NLA_POLICY_RANGE(tp, _min, _max) { \
.type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
@@ -1004,6 +1011,20 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
}
/**
+ * nlmsg_new_large - Allocate a new netlink message with non-contiguous
+ * physical memory
+ * @payload: size of the message payload
+ *
+ * The allocated skb is unable to have frag page for shinfo->frags*,
+ * as the NULL setting for skb->head in netlink_skb_destructor() will
+ * bypass most of the handling in skb_release_data()
+ */
+static inline struct sk_buff *nlmsg_new_large(size_t payload)
+{
+ return netlink_alloc_large_skb(nlmsg_total_size(payload), 0);
+}
+
+/**
* nlmsg_end - Finalize a netlink message
* @skb: socket buffer the message is stored in
* @nlh: netlink message header
@@ -1066,21 +1087,29 @@ static inline void nlmsg_free(struct sk_buff *skb)
}
/**
- * nlmsg_multicast - multicast a netlink message
+ * nlmsg_multicast_filtered - multicast a netlink message with filter function
* @sk: netlink socket to spread messages to
* @skb: netlink message as socket buffer
* @portid: own netlink portid to avoid sending to yourself
* @group: multicast group id
* @flags: allocation flags
+ * @filter: filter function
+ * @filter_data: filter function private data
+ *
+ * Return: 0 on success, negative error code for failure.
*/
-static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
- u32 portid, unsigned int group, gfp_t flags)
+static inline int nlmsg_multicast_filtered(struct sock *sk, struct sk_buff *skb,
+ u32 portid, unsigned int group,
+ gfp_t flags,
+ netlink_filter_fn filter,
+ void *filter_data)
{
int err;
NETLINK_CB(skb).dst_group = group;
- err = netlink_broadcast(sk, skb, portid, group, flags);
+ err = netlink_broadcast_filtered(sk, skb, portid, group, flags,
+ filter, filter_data);
if (err > 0)
err = 0;
@@ -1088,6 +1117,21 @@ static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
}
/**
+ * nlmsg_multicast - multicast a netlink message
+ * @sk: netlink socket to spread messages to
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: multicast group id
+ * @flags: allocation flags
+ */
+static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
+ u32 portid, unsigned int group, gfp_t flags)
+{
+ return nlmsg_multicast_filtered(sk, skb, portid, group, flags,
+ NULL, NULL);
+}
+
+/**
* nlmsg_unicast - unicast a netlink message
* @sk: netlink socket to spread message to
* @skb: netlink message as socket buffer
@@ -1193,7 +1237,7 @@ static inline void *nla_data(const struct nlattr *nla)
* nla_len - length of payload
* @nla: netlink attribute
*/
-static inline int nla_len(const struct nlattr *nla)
+static inline u16 nla_len(const struct nlattr *nla)
{
return nla->nla_len - NLA_HDRLEN;
}
@@ -1362,6 +1406,22 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
}
/**
+ * nla_put_uint - Add a variable-size unsigned int to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_uint(struct sk_buff *skb, int attrtype, u64 value)
+{
+ u64 tmp64 = value;
+ u32 tmp32 = value;
+
+ if (tmp64 == tmp32)
+ return nla_put_u32(skb, attrtype, tmp32);
+ return nla_put(skb, attrtype, sizeof(u64), &tmp64);
+}
+
+/**
* nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
@@ -1516,6 +1576,22 @@ static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
}
/**
+ * nla_put_sint - Add a variable-size signed int to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_sint(struct sk_buff *skb, int attrtype, s64 value)
+{
+ s64 tmp64 = value;
+ s32 tmp32 = value;
+
+ if (tmp64 == tmp32)
+ return nla_put_s32(skb, attrtype, tmp32);
+ return nla_put(skb, attrtype, sizeof(s64), &tmp64);
+}
+
+/**
* nla_put_string - Add a string netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
@@ -1672,6 +1748,17 @@ static inline u64 nla_get_u64(const struct nlattr *nla)
}
/**
+ * nla_get_uint - return payload of uint attribute
+ * @nla: uint netlink attribute
+ */
+static inline u64 nla_get_uint(const struct nlattr *nla)
+{
+ if (nla_len(nla) == sizeof(u32))
+ return nla_get_u32(nla);
+ return nla_get_u64(nla);
+}
+
+/**
* nla_get_be64 - return payload of __be64 attribute
* @nla: __be64 netlink attribute
*/
@@ -1734,6 +1821,17 @@ static inline s64 nla_get_s64(const struct nlattr *nla)
}
/**
+ * nla_get_sint - return payload of uint attribute
+ * @nla: uint netlink attribute
+ */
+static inline s64 nla_get_sint(const struct nlattr *nla)
+{
+ if (nla_len(nla) == sizeof(s32))
+ return nla_get_s32(nla);
+ return nla_get_s64(nla);
+}
+
+/**
* nla_get_flag - return payload of flag attribute
* @nla: flag netlink attribute
*/
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 1f463b3957c7..bae914815aa3 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -107,7 +107,7 @@ struct netns_ct {
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
- unsigned int labels_used;
+ atomic_t labels_used;
#endif
};
#endif
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index a91ef9f8de60..78214f1b43a2 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -13,6 +13,7 @@ struct netns_core {
struct ctl_table_header *sysctl_hdr;
int sysctl_somaxconn;
+ int sysctl_optmem_max;
u8 sysctl_txrehash;
#ifdef CONFIG_PROC_FS
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index db762e35aca9..c356c458b340 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -19,8 +19,7 @@ struct hlist_head;
struct fib_table;
struct sock;
struct local_ports {
- seqlock_t lock;
- int range[2];
+ u32 range; /* high << 16 | low */
bool warned;
};
@@ -42,6 +41,38 @@ struct inet_timewait_death_row {
struct tcp_fastopen_context;
struct netns_ipv4 {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst.
+ * Please update the document when adding new fields.
+ */
+
+ /* TX readonly hotpath cache lines */
+ __cacheline_group_begin(netns_ipv4_read_tx);
+ u8 sysctl_tcp_early_retrans;
+ u8 sysctl_tcp_tso_win_divisor;
+ u8 sysctl_tcp_tso_rtt_log;
+ u8 sysctl_tcp_autocorking;
+ int sysctl_tcp_min_snd_mss;
+ unsigned int sysctl_tcp_notsent_lowat;
+ int sysctl_tcp_limit_output_bytes;
+ int sysctl_tcp_min_rtt_wlen;
+ int sysctl_tcp_wmem[3];
+ u8 sysctl_ip_fwd_use_pmtu;
+ __cacheline_group_end(netns_ipv4_read_tx);
+
+ /* TXRX readonly hotpath cache lines */
+ __cacheline_group_begin(netns_ipv4_read_txrx);
+ u8 sysctl_tcp_moderate_rcvbuf;
+ __cacheline_group_end(netns_ipv4_read_txrx);
+
+ /* RX readonly hotpath cache line */
+ __cacheline_group_begin(netns_ipv4_read_rx);
+ u8 sysctl_ip_early_demux;
+ u8 sysctl_tcp_early_demux;
+ int sysctl_tcp_reordering;
+ int sysctl_tcp_rmem[3];
+ __cacheline_group_end(netns_ipv4_read_rx);
+
struct inet_timewait_death_row tcp_death_row;
struct udp_table *udp_table;
@@ -65,6 +96,7 @@ struct netns_ipv4 {
#endif
bool fib_has_custom_local_routes;
bool fib_offload_disabled;
+ u8 sysctl_tcp_shrink_window;
#ifdef CONFIG_IP_ROUTE_CLASSID
atomic_t fib_num_tclassid_users;
#endif
@@ -95,17 +127,14 @@ struct netns_ipv4 {
u8 sysctl_ip_default_ttl;
u8 sysctl_ip_no_pmtu_disc;
- u8 sysctl_ip_fwd_use_pmtu;
u8 sysctl_ip_fwd_update_priority;
u8 sysctl_ip_nonlocal_bind;
u8 sysctl_ip_autobind_reuse;
/* Shall we try to damage output packets if routing dev changes? */
u8 sysctl_ip_dynaddr;
- u8 sysctl_ip_early_demux;
#ifdef CONFIG_NET_L3_MASTER_DEV
u8 sysctl_raw_l3mdev_accept;
#endif
- u8 sysctl_tcp_early_demux;
u8 sysctl_udp_early_demux;
u8 sysctl_nexthop_compat_mode;
@@ -118,7 +147,6 @@ struct netns_ipv4 {
u8 sysctl_tcp_mtu_probing;
int sysctl_tcp_mtu_probe_floor;
int sysctl_tcp_base_mss;
- int sysctl_tcp_min_snd_mss;
int sysctl_tcp_probe_threshold;
u32 sysctl_tcp_probe_interval;
@@ -131,17 +159,17 @@ struct netns_ipv4 {
u8 sysctl_tcp_syncookies;
u8 sysctl_tcp_migrate_req;
u8 sysctl_tcp_comp_sack_nr;
- int sysctl_tcp_reordering;
+ u8 sysctl_tcp_backlog_ack_defer;
+ u8 sysctl_tcp_pingpong_thresh;
+
u8 sysctl_tcp_retries1;
u8 sysctl_tcp_retries2;
u8 sysctl_tcp_orphan_retries;
u8 sysctl_tcp_tw_reuse;
int sysctl_tcp_fin_timeout;
- unsigned int sysctl_tcp_notsent_lowat;
u8 sysctl_tcp_sack;
u8 sysctl_tcp_window_scaling;
u8 sysctl_tcp_timestamps;
- u8 sysctl_tcp_early_retrans;
u8 sysctl_tcp_recovery;
u8 sysctl_tcp_thin_linear_timeouts;
u8 sysctl_tcp_slow_start_after_idle;
@@ -151,27 +179,19 @@ struct netns_ipv4 {
u8 sysctl_tcp_abort_on_overflow;
u8 sysctl_tcp_fack; /* obsolete */
int sysctl_tcp_max_reordering;
- int sysctl_tcp_adv_win_scale;
+ int sysctl_tcp_adv_win_scale; /* obsolete */
u8 sysctl_tcp_dsack;
u8 sysctl_tcp_app_win;
u8 sysctl_tcp_frto;
u8 sysctl_tcp_nometrics_save;
u8 sysctl_tcp_no_ssthresh_metrics_save;
- u8 sysctl_tcp_moderate_rcvbuf;
- u8 sysctl_tcp_tso_win_divisor;
u8 sysctl_tcp_workaround_signed_windows;
- int sysctl_tcp_limit_output_bytes;
int sysctl_tcp_challenge_ack_limit;
- int sysctl_tcp_min_rtt_wlen;
u8 sysctl_tcp_min_tso_segs;
- u8 sysctl_tcp_tso_rtt_log;
- u8 sysctl_tcp_autocorking;
u8 sysctl_tcp_reflect_tos;
int sysctl_tcp_invalid_ratelimit;
int sysctl_tcp_pacing_ss_ratio;
int sysctl_tcp_pacing_ca_ratio;
- int sysctl_tcp_wmem[3];
- int sysctl_tcp_rmem[3];
unsigned int sysctl_tcp_child_ehash_entries;
unsigned long sysctl_tcp_comp_sack_delay_ns;
unsigned long sysctl_tcp_comp_sack_slack_ns;
@@ -194,6 +214,7 @@ struct netns_ipv4 {
int sysctl_udp_rmem_min;
u8 sysctl_fib_notify_on_flag_change;
+ u8 sysctl_tcp_syn_linear_timeouts;
#ifdef CONFIG_NET_L3_MASTER_DEV
u8 sysctl_udp_l3mdev_accept;
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 8c77832d0240..cc8060c017d5 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -2,8 +2,6 @@
#ifndef _NETNS_NFTABLES_H_
#define _NETNS_NFTABLES_H_
-#include <linux/list.h>
-
struct netns_nftables {
u8 gencursor;
};
diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h
index 582212ada3ba..fc752a50f91b 100644
--- a/include/net/netns/smc.h
+++ b/include/net/netns/smc.h
@@ -22,5 +22,7 @@ struct netns_smc {
int sysctl_smcr_testlink_time;
int sysctl_wmem;
int sysctl_rmem;
+ int sysctl_max_links_per_lgr;
+ int sysctl_max_conns_per_lgr;
};
#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index bd7c3be4af5d..423b52eca908 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -50,6 +50,7 @@ struct netns_xfrm {
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
+ unsigned int idx_generator;
struct hlist_head policy_inexact[XFRM_POLICY_MAX];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 2b12725de9c0..d92046a4a078 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -92,7 +92,7 @@ struct nh_res_table {
u32 unbalanced_timer;
u16 num_nh_buckets;
- struct nh_res_bucket nh_buckets[];
+ struct nh_res_bucket nh_buckets[] __counted_by(num_nh_buckets);
};
struct nh_grp_entry {
@@ -126,7 +126,7 @@ struct nh_group {
bool has_v4;
struct nh_res_table __rcu *res_table;
- struct nh_grp_entry nh_entries[];
+ struct nh_grp_entry nh_entries[] __counted_by(num_nh);
};
struct nexthop {
@@ -187,7 +187,7 @@ struct nh_notifier_grp_entry_info {
struct nh_notifier_grp_info {
u16 num_nh;
bool is_fdb;
- struct nh_notifier_grp_entry_info nh_entries[];
+ struct nh_notifier_grp_entry_info nh_entries[] __counted_by(num_nh);
};
struct nh_notifier_res_bucket_info {
@@ -200,7 +200,7 @@ struct nh_notifier_res_bucket_info {
struct nh_notifier_res_table_info {
u16 num_nh_buckets;
- struct nh_notifier_single_info nhs[];
+ struct nh_notifier_single_info nhs[] __counted_by(num_nh_buckets);
};
struct nh_notifier_info {
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index 8cd9d141f5af..4c752f799957 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -78,6 +78,10 @@ enum nl802154_commands {
NL802154_CMD_SCAN_DONE,
NL802154_CMD_SEND_BEACONS,
NL802154_CMD_STOP_BEACONS,
+ NL802154_CMD_ASSOCIATE,
+ NL802154_CMD_DISASSOCIATE,
+ NL802154_CMD_SET_MAX_ASSOCIATIONS,
+ NL802154_CMD_LIST_ASSOCIATIONS,
/* add new commands above here */
@@ -147,6 +151,8 @@ enum nl802154_attrs {
NL802154_ATTR_SCAN_DURATION,
NL802154_ATTR_SCAN_DONE_REASON,
NL802154_ATTR_BEACON_INTERVAL,
+ NL802154_ATTR_MAX_ASSOCIATIONS,
+ NL802154_ATTR_PEER,
/* add attributes here, update the policy in nl802154.c */
@@ -385,8 +391,6 @@ enum nl802154_supported_bool_states {
NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
};
-#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
-
enum nl802154_dev_addr_modes {
NL802154_DEV_ADDR_NONE,
__NL802154_DEV_ADDR_INVALID,
@@ -406,12 +410,26 @@ enum nl802154_dev_addr_attrs {
NL802154_DEV_ADDR_ATTR_SHORT,
NL802154_DEV_ADDR_ATTR_EXTENDED,
NL802154_DEV_ADDR_ATTR_PAD,
+ NL802154_DEV_ADDR_ATTR_PEER_TYPE,
/* keep last */
__NL802154_DEV_ADDR_ATTR_AFTER_LAST,
NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1
};
+enum nl802154_peer_type {
+ NL802154_PEER_TYPE_UNSPEC,
+
+ NL802154_PEER_TYPE_PARENT,
+ NL802154_PEER_TYPE_CHILD,
+
+ /* keep last */
+ __NL802154_PEER_TYPE_AFTER_LAST,
+ NL802154_PEER_TYPE_MAX = __NL802154_PEER_TYPE_AFTER_LAST - 1
+};
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+
enum nl802154_key_id_modes {
NL802154_KEY_ID_MODE_IMPLICIT,
NL802154_KEY_ID_MODE_INDEX,
diff --git a/include/net/nsh.h b/include/net/nsh.h
index 350b1ad11c7f..16a751093896 100644
--- a/include/net/nsh.h
+++ b/include/net/nsh.h
@@ -192,7 +192,7 @@
/**
* struct nsh_md1_ctx - Keeps track of NSH context data
- * @nshc<1-4>: NSH Contexts.
+ * @context: NSH Contexts.
*/
struct nsh_md1_ctx {
__be32 context[4];
diff --git a/include/net/p8022.h b/include/net/p8022.h
index b690ffcad66b..a29e224ac498 100644
--- a/include/net/p8022.h
+++ b/include/net/p8022.h
@@ -13,7 +13,4 @@ register_8022_client(unsigned char type,
struct packet_type *pt,
struct net_device *orig_dev));
void unregister_8022_client(struct datalink_proto *proto);
-
-struct datalink_proto *make_8023_client(void);
-void destroy_8023_client(struct datalink_proto *dl);
#endif
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
deleted file mode 100644
index 126f9e294389..000000000000
--- a/include/net/page_pool.h
+++ /dev/null
@@ -1,402 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * page_pool.h
- * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
- * Copyright (C) 2016 Red Hat, Inc.
- */
-
-/**
- * DOC: page_pool allocator
- *
- * This page_pool allocator is optimized for the XDP mode that
- * uses one-frame-per-page, but have fallbacks that act like the
- * regular page allocator APIs.
- *
- * Basic use involve replacing alloc_pages() calls with the
- * page_pool_alloc_pages() call. Drivers should likely use
- * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
- *
- * API keeps track of in-flight pages, in-order to let API user know
- * when it is safe to dealloactor page_pool object. Thus, API users
- * must make sure to call page_pool_release_page() when a page is
- * "leaving" the page_pool. Or call page_pool_put_page() where
- * appropiate. For maintaining correct accounting.
- *
- * API user must only call page_pool_put_page() once on a page, as it
- * will either recycle the page, or in case of elevated refcnt, it
- * will release the DMA mapping and in-flight state accounting. We
- * hope to lift this requirement in the future.
- */
-#ifndef _NET_PAGE_POOL_H
-#define _NET_PAGE_POOL_H
-
-#include <linux/mm.h> /* Needed by ptr_ring */
-#include <linux/ptr_ring.h>
-#include <linux/dma-direction.h>
-
-#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
- * map/unmap
- */
-#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
- * from page_pool will be
- * DMA-synced-for-device according to
- * the length provided by the device
- * driver.
- * Please note DMA-sync-for-CPU is still
- * device driver responsibility
- */
-#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */
-#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
- PP_FLAG_DMA_SYNC_DEV |\
- PP_FLAG_PAGE_FRAG)
-
-/*
- * Fast allocation side cache array/stack
- *
- * The cache size and refill watermark is related to the network
- * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
- * ring is usually refilled and the max consumed elements will be 64,
- * thus a natural max size of objects needed in the cache.
- *
- * Keeping room for more objects, is due to XDP_DROP use-case. As
- * XDP_DROP allows the opportunity to recycle objects directly into
- * this array, as it shares the same softirq/NAPI protection. If
- * cache is already full (or partly full) then the XDP_DROP recycles
- * would have to take a slower code path.
- */
-#define PP_ALLOC_CACHE_SIZE 128
-#define PP_ALLOC_CACHE_REFILL 64
-struct pp_alloc_cache {
- u32 count;
- struct page *cache[PP_ALLOC_CACHE_SIZE];
-};
-
-struct page_pool_params {
- unsigned int flags;
- unsigned int order;
- unsigned int pool_size;
- int nid; /* Numa node id to allocate from pages from */
- struct device *dev; /* device, for DMA pre-mapping purposes */
- struct napi_struct *napi; /* Sole consumer of pages, otherwise NULL */
- enum dma_data_direction dma_dir; /* DMA mapping direction */
- unsigned int max_len; /* max DMA sync memory size */
- unsigned int offset; /* DMA addr offset */
- void (*init_callback)(struct page *page, void *arg);
- void *init_arg;
-};
-
-#ifdef CONFIG_PAGE_POOL_STATS
-struct page_pool_alloc_stats {
- u64 fast; /* fast path allocations */
- u64 slow; /* slow-path order 0 allocations */
- u64 slow_high_order; /* slow-path high order allocations */
- u64 empty; /* failed refills due to empty ptr ring, forcing
- * slow path allocation
- */
- u64 refill; /* allocations via successful refill */
- u64 waive; /* failed refills due to numa zone mismatch */
-};
-
-struct page_pool_recycle_stats {
- u64 cached; /* recycling placed page in the cache. */
- u64 cache_full; /* cache was full */
- u64 ring; /* recycling placed page back into ptr ring */
- u64 ring_full; /* page was released from page-pool because
- * PTR ring was full.
- */
- u64 released_refcnt; /* page released because of elevated
- * refcnt
- */
-};
-
-/* This struct wraps the above stats structs so users of the
- * page_pool_get_stats API can pass a single argument when requesting the
- * stats for the page pool.
- */
-struct page_pool_stats {
- struct page_pool_alloc_stats alloc_stats;
- struct page_pool_recycle_stats recycle_stats;
-};
-
-int page_pool_ethtool_stats_get_count(void);
-u8 *page_pool_ethtool_stats_get_strings(u8 *data);
-u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
-
-/*
- * Drivers that wish to harvest page pool stats and report them to users
- * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
- * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
- */
-bool page_pool_get_stats(struct page_pool *pool,
- struct page_pool_stats *stats);
-#else
-
-static inline int page_pool_ethtool_stats_get_count(void)
-{
- return 0;
-}
-
-static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
-{
- return data;
-}
-
-static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
-{
- return data;
-}
-
-#endif
-
-struct page_pool {
- struct page_pool_params p;
-
- struct delayed_work release_dw;
- void (*disconnect)(void *);
- unsigned long defer_start;
- unsigned long defer_warn;
-
- u32 pages_state_hold_cnt;
- unsigned int frag_offset;
- struct page *frag_page;
- long frag_users;
-
-#ifdef CONFIG_PAGE_POOL_STATS
- /* these stats are incremented while in softirq context */
- struct page_pool_alloc_stats alloc_stats;
-#endif
- u32 xdp_mem_id;
-
- /*
- * Data structure for allocation side
- *
- * Drivers allocation side usually already perform some kind
- * of resource protection. Piggyback on this protection, and
- * require driver to protect allocation side.
- *
- * For NIC drivers this means, allocate a page_pool per
- * RX-queue. As the RX-queue is already protected by
- * Softirq/BH scheduling and napi_schedule. NAPI schedule
- * guarantee that a single napi_struct will only be scheduled
- * on a single CPU (see napi_schedule).
- */
- struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
-
- /* Data structure for storing recycled pages.
- *
- * Returning/freeing pages is more complicated synchronization
- * wise, because free's can happen on remote CPUs, with no
- * association with allocation resource.
- *
- * Use ptr_ring, as it separates consumer and producer
- * effeciently, it a way that doesn't bounce cache-lines.
- *
- * TODO: Implement bulk return pages into this structure.
- */
- struct ptr_ring ring;
-
-#ifdef CONFIG_PAGE_POOL_STATS
- /* recycle stats are per-cpu to avoid locking */
- struct page_pool_recycle_stats __percpu *recycle_stats;
-#endif
- atomic_t pages_state_release_cnt;
-
- /* A page_pool is strictly tied to a single RX-queue being
- * protected by NAPI, due to above pp_alloc_cache. This
- * refcnt serves purpose is to simplify drivers error handling.
- */
- refcount_t user_cnt;
-
- u64 destroy_cnt;
-};
-
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
-
-static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
-{
- gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
-
- return page_pool_alloc_pages(pool, gfp);
-}
-
-struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
- unsigned int size, gfp_t gfp);
-
-static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
- unsigned int *offset,
- unsigned int size)
-{
- gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
-
- return page_pool_alloc_frag(pool, offset, size, gfp);
-}
-
-/* get the stored dma direction. A driver might decide to treat this locally and
- * avoid the extra cache line from page_pool to determine the direction
- */
-static
-inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
-{
- return pool->p.dma_dir;
-}
-
-bool page_pool_return_skb_page(struct page *page, bool napi_safe);
-
-struct page_pool *page_pool_create(const struct page_pool_params *params);
-
-struct xdp_mem_info;
-
-#ifdef CONFIG_PAGE_POOL
-void page_pool_unlink_napi(struct page_pool *pool);
-void page_pool_destroy(struct page_pool *pool);
-void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
- struct xdp_mem_info *mem);
-void page_pool_release_page(struct page_pool *pool, struct page *page);
-void page_pool_put_page_bulk(struct page_pool *pool, void **data,
- int count);
-#else
-static inline void page_pool_unlink_napi(struct page_pool *pool)
-{
-}
-
-static inline void page_pool_destroy(struct page_pool *pool)
-{
-}
-
-static inline void page_pool_use_xdp_mem(struct page_pool *pool,
- void (*disconnect)(void *),
- struct xdp_mem_info *mem)
-{
-}
-static inline void page_pool_release_page(struct page_pool *pool,
- struct page *page)
-{
-}
-
-static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
- int count)
-{
-}
-#endif
-
-void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
- unsigned int dma_sync_size,
- bool allow_direct);
-
-/* pp_frag_count represents the number of writers who can update the page
- * either by updating skb->data or via DMA mappings for the device.
- * We can't rely on the page refcnt for that as we don't know who might be
- * holding page references and we can't reliably destroy or sync DMA mappings
- * of the fragments.
- *
- * When pp_frag_count reaches 0 we can either recycle the page if the page
- * refcnt is 1 or return it back to the memory allocator and destroy any
- * mappings we have.
- */
-static inline void page_pool_fragment_page(struct page *page, long nr)
-{
- atomic_long_set(&page->pp_frag_count, nr);
-}
-
-static inline long page_pool_defrag_page(struct page *page, long nr)
-{
- long ret;
-
- /* If nr == pp_frag_count then we have cleared all remaining
- * references to the page. No need to actually overwrite it, instead
- * we can leave this to be overwritten by the calling function.
- *
- * The main advantage to doing this is that an atomic_read is
- * generally a much cheaper operation than an atomic update,
- * especially when dealing with a page that may be partitioned
- * into only 2 or 3 pieces.
- */
- if (atomic_long_read(&page->pp_frag_count) == nr)
- return 0;
-
- ret = atomic_long_sub_return(nr, &page->pp_frag_count);
- WARN_ON(ret < 0);
- return ret;
-}
-
-static inline bool page_pool_is_last_frag(struct page_pool *pool,
- struct page *page)
-{
- /* If fragments aren't enabled or count is 0 we were the last user */
- return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
- (page_pool_defrag_page(page, 1) == 0);
-}
-
-static inline void page_pool_put_page(struct page_pool *pool,
- struct page *page,
- unsigned int dma_sync_size,
- bool allow_direct)
-{
- /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
- * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
- */
-#ifdef CONFIG_PAGE_POOL
- if (!page_pool_is_last_frag(pool, page))
- return;
-
- page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
-#endif
-}
-
-/* Same as above but will try to sync the entire area pool->max_len */
-static inline void page_pool_put_full_page(struct page_pool *pool,
- struct page *page, bool allow_direct)
-{
- page_pool_put_page(pool, page, -1, allow_direct);
-}
-
-/* Same as above but the caller must guarantee safe context. e.g NAPI */
-static inline void page_pool_recycle_direct(struct page_pool *pool,
- struct page *page)
-{
- page_pool_put_full_page(pool, page, true);
-}
-
-#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT \
- (sizeof(dma_addr_t) > sizeof(unsigned long))
-
-static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
-{
- dma_addr_t ret = page->dma_addr;
-
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
-
- return ret;
-}
-
-static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
-{
- page->dma_addr = addr;
- if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
- page->dma_addr_upper = upper_32_bits(addr);
-}
-
-static inline bool is_page_pool_compiled_in(void)
-{
-#ifdef CONFIG_PAGE_POOL
- return true;
-#else
- return false;
-#endif
-}
-
-static inline bool page_pool_put(struct page_pool *pool)
-{
- return refcount_dec_and_test(&pool->user_cnt);
-}
-
-/* Caller must provide appropriate safe context, e.g. NAPI. */
-void page_pool_update_nid(struct page_pool *pool, int new_nid);
-static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
-{
- if (unlikely(pool->p.nid != new_nid))
- page_pool_update_nid(pool, new_nid);
-}
-
-#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
new file mode 100644
index 000000000000..1d397c1a0043
--- /dev/null
+++ b/include/net/page_pool/helpers.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * page_pool/helpers.h
+ * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
+ * Copyright (C) 2016 Red Hat, Inc.
+ */
+
+/**
+ * DOC: page_pool allocator
+ *
+ * The page_pool allocator is optimized for recycling page or page fragment used
+ * by skb packet and xdp frame.
+ *
+ * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(),
+ * which allocate memory with or without page splitting depending on the
+ * requested memory size.
+ *
+ * If the driver knows that it always requires full pages or its allocations are
+ * always smaller than half a page, it can use one of the more specific API
+ * calls:
+ *
+ * 1. page_pool_alloc_pages(): allocate memory without page splitting when
+ * driver knows that the memory it need is always bigger than half of the page
+ * allocated from page pool. There is no cache line dirtying for 'struct page'
+ * when a page is recycled back to the page pool.
+ *
+ * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver
+ * knows that the memory it need is always smaller than or equal to half of the
+ * page allocated from page pool. Page splitting enables memory saving and thus
+ * avoids TLB/cache miss for data access, but there also is some cost to
+ * implement page splitting, mainly some cache line dirtying/bouncing for
+ * 'struct page' and atomic operation for page->pp_ref_count.
+ *
+ * The API keeps track of in-flight pages, in order to let API users know when
+ * it is safe to free a page_pool object, the API users must call
+ * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or
+ * attach the page_pool object to a page_pool-aware object like skbs marked with
+ * skb_mark_for_recycle().
+ *
+ * page_pool_put_page() may be called multiple times on the same page if a page
+ * is split into multiple fragments. For the last fragment, it will either
+ * recycle the page, or in case of page->_refcount > 1, it will release the DMA
+ * mapping and in-flight state accounting.
+ *
+ * dma_sync_single_range_for_device() is only called for the last fragment when
+ * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the
+ * last freed fragment to do the sync_for_device operation for all fragments in
+ * the same page when a page is split. The API user must setup pool->p.max_len
+ * and pool->p.offset correctly and ensure that page_pool_put_page() is called
+ * with dma_sync_size being -1 for fragment API.
+ */
+#ifndef _NET_PAGE_POOL_HELPERS_H
+#define _NET_PAGE_POOL_HELPERS_H
+
+#include <net/page_pool/types.h>
+
+#ifdef CONFIG_PAGE_POOL_STATS
+/* Deprecated driver-facing API, use netlink instead */
+int page_pool_ethtool_stats_get_count(void);
+u8 *page_pool_ethtool_stats_get_strings(u8 *data);
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
+
+bool page_pool_get_stats(const struct page_pool *pool,
+ struct page_pool_stats *stats);
+#else
+static inline int page_pool_ethtool_stats_get_count(void)
+{
+ return 0;
+}
+
+static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
+{
+ return data;
+}
+
+static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+{
+ return data;
+}
+#endif
+
+/**
+ * page_pool_dev_alloc_pages() - allocate a page.
+ * @pool: pool from which to allocate
+ *
+ * Get a page from the page allocator or page_pool caches.
+ */
+static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_pages(pool, gfp);
+}
+
+/**
+ * page_pool_dev_alloc_frag() - allocate a page fragment.
+ * @pool: pool from which to allocate
+ * @offset: offset to the allocated page
+ * @size: requested size
+ *
+ * Get a page fragment from the page allocator or page_pool caches.
+ *
+ * Return:
+ * Return allocated page fragment, otherwise return NULL.
+ */
+static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_frag(pool, offset, size, gfp);
+}
+
+static inline struct page *page_pool_alloc(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size, gfp_t gfp)
+{
+ unsigned int max_size = PAGE_SIZE << pool->p.order;
+ struct page *page;
+
+ if ((*size << 1) > max_size) {
+ *size = max_size;
+ *offset = 0;
+ return page_pool_alloc_pages(pool, gfp);
+ }
+
+ page = page_pool_alloc_frag(pool, offset, *size, gfp);
+ if (unlikely(!page))
+ return NULL;
+
+ /* There is very likely not enough space for another fragment, so append
+ * the remaining size to the current fragment to avoid truesize
+ * underestimate problem.
+ */
+ if (pool->frag_offset + *size > max_size) {
+ *size = max_size - *offset;
+ pool->frag_offset = max_size;
+ }
+
+ return page;
+}
+
+/**
+ * page_pool_dev_alloc() - allocate a page or a page fragment.
+ * @pool: pool from which to allocate
+ * @offset: offset to the allocated page
+ * @size: in as the requested size, out as the allocated size
+ *
+ * Get a page or a page fragment from the page allocator or page_pool caches
+ * depending on the requested size in order to allocate memory with least memory
+ * utilization and performance penalty.
+ *
+ * Return:
+ * Return allocated page or page fragment, otherwise return NULL.
+ */
+static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
+ unsigned int *offset,
+ unsigned int *size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc(pool, offset, size, gfp);
+}
+
+static inline void *page_pool_alloc_va(struct page_pool *pool,
+ unsigned int *size, gfp_t gfp)
+{
+ unsigned int offset;
+ struct page *page;
+
+ /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */
+ page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM);
+ if (unlikely(!page))
+ return NULL;
+
+ return page_address(page) + offset;
+}
+
+/**
+ * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its
+ * va.
+ * @pool: pool from which to allocate
+ * @size: in as the requested size, out as the allocated size
+ *
+ * This is just a thin wrapper around the page_pool_alloc() API, and
+ * it returns va of the allocated page or page fragment.
+ *
+ * Return:
+ * Return the va for the allocated page or page fragment, otherwise return NULL.
+ */
+static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
+ unsigned int *size)
+{
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ return page_pool_alloc_va(pool, size, gfp);
+}
+
+/**
+ * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
+ * @pool: pool from which page was allocated
+ *
+ * Get the stored dma direction. A driver might decide to store this locally
+ * and avoid the extra cache line from page_pool to determine the direction.
+ */
+static
+inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
+{
+ return pool->p.dma_dir;
+}
+
+/**
+ * page_pool_fragment_page() - split a fresh page into fragments
+ * @page: page to split
+ * @nr: references to set
+ *
+ * pp_ref_count represents the number of outstanding references to the page,
+ * which will be freed using page_pool APIs (rather than page allocator APIs
+ * like put_page()). Such references are usually held by page_pool-aware
+ * objects like skbs marked for page pool recycling.
+ *
+ * This helper allows the caller to take (set) multiple references to a
+ * freshly allocated page. The page must be freshly allocated (have a
+ * pp_ref_count of 1). This is commonly done by drivers and
+ * "fragment allocators" to save atomic operations - either when they know
+ * upfront how many references they will need; or to take MAX references and
+ * return the unused ones with a single atomic dec(), instead of performing
+ * multiple atomic inc() operations.
+ */
+static inline void page_pool_fragment_page(struct page *page, long nr)
+{
+ atomic_long_set(&page->pp_ref_count, nr);
+}
+
+static inline long page_pool_unref_page(struct page *page, long nr)
+{
+ long ret;
+
+ /* If nr == pp_ref_count then we have cleared all remaining
+ * references to the page:
+ * 1. 'n == 1': no need to actually overwrite it.
+ * 2. 'n != 1': overwrite it with one, which is the rare case
+ * for pp_ref_count draining.
+ *
+ * The main advantage to doing this is that not only we avoid a atomic
+ * update, as an atomic_read is generally a much cheaper operation than
+ * an atomic update, especially when dealing with a page that may be
+ * referenced by only 2 or 3 users; but also unify the pp_ref_count
+ * handling by ensuring all pages have partitioned into only 1 piece
+ * initially, and only overwrite it when the page is partitioned into
+ * more than one piece.
+ */
+ if (atomic_long_read(&page->pp_ref_count) == nr) {
+ /* As we have ensured nr is always one for constant case using
+ * the BUILD_BUG_ON(), only need to handle the non-constant case
+ * here for pp_ref_count draining, which is a rare case.
+ */
+ BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
+ if (!__builtin_constant_p(nr))
+ atomic_long_set(&page->pp_ref_count, 1);
+
+ return 0;
+ }
+
+ ret = atomic_long_sub_return(nr, &page->pp_ref_count);
+ WARN_ON(ret < 0);
+
+ /* We are the last user here too, reset pp_ref_count back to 1 to
+ * ensure all pages have been partitioned into 1 piece initially,
+ * this should be the rare case when the last two fragment users call
+ * page_pool_unref_page() currently.
+ */
+ if (unlikely(!ret))
+ atomic_long_set(&page->pp_ref_count, 1);
+
+ return ret;
+}
+
+static inline void page_pool_ref_page(struct page *page)
+{
+ atomic_long_inc(&page->pp_ref_count);
+}
+
+static inline bool page_pool_is_last_ref(struct page *page)
+{
+ /* If page_pool_unref_page() returns 0, we were the last user */
+ return page_pool_unref_page(page, 1) == 0;
+}
+
+/**
+ * page_pool_put_page() - release a reference to a page pool page
+ * @pool: pool from which page was allocated
+ * @page: page to release a reference on
+ * @dma_sync_size: how much of the page may have been touched by the device
+ * @allow_direct: released by the consumer, allow lockless caching
+ *
+ * The outcome of this depends on the page refcnt. If the driver bumps
+ * the refcnt > 1 this will unmap the page. If the page refcnt is 1
+ * the allocator owns the page and will try to recycle it in one of the pool
+ * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
+ * using dma_sync_single_range_for_device().
+ */
+static inline void page_pool_put_page(struct page_pool *pool,
+ struct page *page,
+ unsigned int dma_sync_size,
+ bool allow_direct)
+{
+ /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
+ * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+ */
+#ifdef CONFIG_PAGE_POOL
+ if (!page_pool_is_last_ref(page))
+ return;
+
+ page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct);
+#endif
+}
+
+/**
+ * page_pool_put_full_page() - release a reference on a page pool page
+ * @pool: pool from which page was allocated
+ * @page: page to release a reference on
+ * @allow_direct: released by the consumer, allow lockless caching
+ *
+ * Similar to page_pool_put_page(), but will DMA sync the entire memory area
+ * as configured in &page_pool_params.max_len.
+ */
+static inline void page_pool_put_full_page(struct page_pool *pool,
+ struct page *page, bool allow_direct)
+{
+ page_pool_put_page(pool, page, -1, allow_direct);
+}
+
+/**
+ * page_pool_recycle_direct() - release a reference on a page pool page
+ * @pool: pool from which page was allocated
+ * @page: page to release a reference on
+ *
+ * Similar to page_pool_put_full_page() but caller must guarantee safe context
+ * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
+ */
+static inline void page_pool_recycle_direct(struct page_pool *pool,
+ struct page *page)
+{
+ page_pool_put_full_page(pool, page, true);
+}
+
+#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \
+ (sizeof(dma_addr_t) > sizeof(unsigned long))
+
+/**
+ * page_pool_free_va() - free a va into the page_pool
+ * @pool: pool from which va was allocated
+ * @va: va to be freed
+ * @allow_direct: freed by the consumer, allow lockless caching
+ *
+ * Free a va allocated from page_pool_allo_va().
+ */
+static inline void page_pool_free_va(struct page_pool *pool, void *va,
+ bool allow_direct)
+{
+ page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
+}
+
+/**
+ * page_pool_get_dma_addr() - Retrieve the stored DMA address.
+ * @page: page allocated from a page pool
+ *
+ * Fetch the DMA address of the page. The page pool to which the page belongs
+ * must had been created with PP_FLAG_DMA_MAP.
+ */
+static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+{
+ dma_addr_t ret = page->dma_addr;
+
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
+ ret <<= PAGE_SHIFT;
+
+ return ret;
+}
+
+static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+ if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
+ page->dma_addr = addr >> PAGE_SHIFT;
+
+ /* We assume page alignment to shave off bottom bits,
+ * if this "compression" doesn't work we need to drop.
+ */
+ return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
+ }
+
+ page->dma_addr = addr;
+ return false;
+}
+
+static inline bool page_pool_put(struct page_pool *pool)
+{
+ return refcount_dec_and_test(&pool->user_cnt);
+}
+
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+{
+ if (unlikely(pool->p.nid != new_nid))
+ page_pool_update_nid(pool, new_nid);
+}
+
+#endif /* _NET_PAGE_POOL_HELPERS_H */
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
new file mode 100644
index 000000000000..76481c465375
--- /dev/null
+++ b/include/net/page_pool/types.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NET_PAGE_POOL_TYPES_H
+#define _NET_PAGE_POOL_TYPES_H
+
+#include <linux/dma-direction.h>
+#include <linux/ptr_ring.h>
+#include <linux/types.h>
+
+#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
+ * map/unmap
+ */
+#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
+ * from page_pool will be
+ * DMA-synced-for-device according to
+ * the length provided by the device
+ * driver.
+ * Please note DMA-sync-for-CPU is still
+ * device driver responsibility
+ */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
+ PP_FLAG_DMA_SYNC_DEV)
+
+/*
+ * Fast allocation side cache array/stack
+ *
+ * The cache size and refill watermark is related to the network
+ * use-case. The NAPI budget is 64 packets. After a NAPI poll the RX
+ * ring is usually refilled and the max consumed elements will be 64,
+ * thus a natural max size of objects needed in the cache.
+ *
+ * Keeping room for more objects, is due to XDP_DROP use-case. As
+ * XDP_DROP allows the opportunity to recycle objects directly into
+ * this array, as it shares the same softirq/NAPI protection. If
+ * cache is already full (or partly full) then the XDP_DROP recycles
+ * would have to take a slower code path.
+ */
+#define PP_ALLOC_CACHE_SIZE 128
+#define PP_ALLOC_CACHE_REFILL 64
+struct pp_alloc_cache {
+ u32 count;
+ struct page *cache[PP_ALLOC_CACHE_SIZE];
+};
+
+/**
+ * struct page_pool_params - page pool parameters
+ * @flags: PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV
+ * @order: 2^order pages on allocation
+ * @pool_size: size of the ptr_ring
+ * @nid: NUMA node id to allocate from pages from
+ * @dev: device, for DMA pre-mapping purposes
+ * @netdev: netdev this pool will serve (leave as NULL if none or multiple)
+ * @napi: NAPI which is the sole consumer of pages, otherwise NULL
+ * @dma_dir: DMA mapping direction
+ * @max_len: max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
+ * @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
+ */
+struct page_pool_params {
+ struct_group_tagged(page_pool_params_fast, fast,
+ unsigned int flags;
+ unsigned int order;
+ unsigned int pool_size;
+ int nid;
+ struct device *dev;
+ struct napi_struct *napi;
+ enum dma_data_direction dma_dir;
+ unsigned int max_len;
+ unsigned int offset;
+ );
+ struct_group_tagged(page_pool_params_slow, slow,
+ struct net_device *netdev;
+/* private: used by test code only */
+ void (*init_callback)(struct page *page, void *arg);
+ void *init_arg;
+ );
+};
+
+#ifdef CONFIG_PAGE_POOL_STATS
+/**
+ * struct page_pool_alloc_stats - allocation statistics
+ * @fast: successful fast path allocations
+ * @slow: slow path order-0 allocations
+ * @slow_high_order: slow path high order allocations
+ * @empty: ptr ring is empty, so a slow path allocation was forced
+ * @refill: an allocation which triggered a refill of the cache
+ * @waive: pages obtained from the ptr ring that cannot be added to
+ * the cache due to a NUMA mismatch
+ */
+struct page_pool_alloc_stats {
+ u64 fast;
+ u64 slow;
+ u64 slow_high_order;
+ u64 empty;
+ u64 refill;
+ u64 waive;
+};
+
+/**
+ * struct page_pool_recycle_stats - recycling (freeing) statistics
+ * @cached: recycling placed page in the page pool cache
+ * @cache_full: page pool cache was full
+ * @ring: page placed into the ptr ring
+ * @ring_full: page released from page pool because the ptr ring was full
+ * @released_refcnt: page released (and not recycled) because refcnt > 1
+ */
+struct page_pool_recycle_stats {
+ u64 cached;
+ u64 cache_full;
+ u64 ring;
+ u64 ring_full;
+ u64 released_refcnt;
+};
+
+/**
+ * struct page_pool_stats - combined page pool use statistics
+ * @alloc_stats: see struct page_pool_alloc_stats
+ * @recycle_stats: see struct page_pool_recycle_stats
+ *
+ * Wrapper struct for combining page pool stats with different storage
+ * requirements.
+ */
+struct page_pool_stats {
+ struct page_pool_alloc_stats alloc_stats;
+ struct page_pool_recycle_stats recycle_stats;
+};
+#endif
+
+struct page_pool {
+ struct page_pool_params_fast p;
+
+ bool has_init_callback;
+
+ long frag_users;
+ struct page *frag_page;
+ unsigned int frag_offset;
+ u32 pages_state_hold_cnt;
+
+ struct delayed_work release_dw;
+ void (*disconnect)(void *pool);
+ unsigned long defer_start;
+ unsigned long defer_warn;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* these stats are incremented while in softirq context */
+ struct page_pool_alloc_stats alloc_stats;
+#endif
+ u32 xdp_mem_id;
+
+ /*
+ * Data structure for allocation side
+ *
+ * Drivers allocation side usually already perform some kind
+ * of resource protection. Piggyback on this protection, and
+ * require driver to protect allocation side.
+ *
+ * For NIC drivers this means, allocate a page_pool per
+ * RX-queue. As the RX-queue is already protected by
+ * Softirq/BH scheduling and napi_schedule. NAPI schedule
+ * guarantee that a single napi_struct will only be scheduled
+ * on a single CPU (see napi_schedule).
+ */
+ struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
+
+ /* Data structure for storing recycled pages.
+ *
+ * Returning/freeing pages is more complicated synchronization
+ * wise, because free's can happen on remote CPUs, with no
+ * association with allocation resource.
+ *
+ * Use ptr_ring, as it separates consumer and producer
+ * efficiently, it a way that doesn't bounce cache-lines.
+ *
+ * TODO: Implement bulk return pages into this structure.
+ */
+ struct ptr_ring ring;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+ /* recycle stats are per-cpu to avoid locking */
+ struct page_pool_recycle_stats __percpu *recycle_stats;
+#endif
+ atomic_t pages_state_release_cnt;
+
+ /* A page_pool is strictly tied to a single RX-queue being
+ * protected by NAPI, due to above pp_alloc_cache. This
+ * refcnt serves purpose is to simplify drivers error handling.
+ */
+ refcount_t user_cnt;
+
+ u64 destroy_cnt;
+
+ /* Slow/Control-path information follows */
+ struct page_pool_params_slow slow;
+ /* User-facing fields, protected by page_pools_lock */
+ struct {
+ struct hlist_node list;
+ u64 detach_time;
+ u32 napi_id;
+ u32 id;
+ } user;
+};
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+ unsigned int size, gfp_t gfp);
+struct page_pool *page_pool_create(const struct page_pool_params *params);
+
+struct xdp_mem_info;
+
+#ifdef CONFIG_PAGE_POOL
+void page_pool_unlink_napi(struct page_pool *pool);
+void page_pool_destroy(struct page_pool *pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+ struct xdp_mem_info *mem);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count);
+#else
+static inline void page_pool_unlink_napi(struct page_pool *pool)
+{
+}
+
+static inline void page_pool_destroy(struct page_pool *pool)
+{
+}
+
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
+ void (*disconnect)(void *),
+ struct xdp_mem_info *mem)
+{
+}
+
+static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+ int count)
+{
+}
+#endif
+
+void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size,
+ bool allow_direct);
+
+static inline bool is_page_pool_compiled_in(void)
+{
+#ifdef CONFIG_PAGE_POOL
+ return true;
+#else
+ return false;
+#endif
+}
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+
+#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index 862f1719b523..cf5ecae4a2fc 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -109,4 +109,25 @@ void phonet_sysctl_exit(void);
int isi_register(void);
void isi_unregister(void);
+static inline bool sk_is_phonet(struct sock *sk)
+{
+ return sk->sk_family == PF_PHONET;
+}
+
+static inline int phonet_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ int karg;
+
+ switch (cmd) {
+ case SIOCPNADDRESOURCE:
+ case SIOCPNDELRESOURCE:
+ if (get_user(karg, (int __user *)arg))
+ return -EFAULT;
+
+ return sk->sk_prot->ioctl(sk, cmd, &karg);
+ }
+ /* A positive return value means that the ioctl was not processed */
+ return 1;
+}
#endif
diff --git a/include/net/pie.h b/include/net/pie.h
index 3fe2361e03b4..01cbc66825a4 100644
--- a/include/net/pie.h
+++ b/include/net/pie.h
@@ -17,7 +17,7 @@
/**
* struct pie_params - contains pie parameters
* @target: target delay in pschedtime
- * @tudpate: interval at which drop probability is calculated
+ * @tupdate: interval at which drop probability is calculated
* @limit: total number of packets that can be in the queue
* @alpha: parameter to control drop probability
* @beta: parameter to control drop probability
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index b3b5b0b62f16..f308e8268651 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -138,19 +138,6 @@ static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
return NULL;
}
-static inline
-int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
- void *cb_priv)
-{
- return 0;
-}
-
-static inline
-void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
- void *cb_priv)
-{
-}
-
static inline int tcf_classify(struct sk_buff *skb,
const struct tcf_block *block,
const struct tcf_proto *tp,
@@ -866,8 +853,10 @@ struct tc_htb_qopt_offload {
u32 parent_classid;
u16 classid;
u16 qid;
+ u32 quantum;
u64 rate;
u64 ceil;
+ u8 prio;
};
#define TC_HTB_CLASSID_ROOT U32_MAX
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 5722931d83d4..1e200d9a066d 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -20,10 +20,10 @@ struct qdisc_walker {
int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
};
-static inline void *qdisc_priv(struct Qdisc *q)
-{
- return &q->privdata;
-}
+#define qdisc_priv(q) \
+ _Generic(q, \
+ const struct Qdisc * : (const void *)&q->privdata, \
+ struct Qdisc * : (void *)&q->privdata)
static inline struct Qdisc *qdisc_from_priv(void *priv)
{
@@ -134,7 +134,7 @@ extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
*/
static inline unsigned int psched_mtu(const struct net_device *dev)
{
- return dev->mtu + dev->hard_header_len;
+ return READ_ONCE(dev->mtu) + dev->hard_header_len;
}
static inline struct net *qdisc_net(struct Qdisc *q)
@@ -187,6 +187,32 @@ struct tc_taprio_caps {
bool broken_mqprio:1;
};
+enum tc_taprio_qopt_cmd {
+ TAPRIO_CMD_REPLACE,
+ TAPRIO_CMD_DESTROY,
+ TAPRIO_CMD_STATS,
+ TAPRIO_CMD_QUEUE_STATS,
+};
+
+/**
+ * struct tc_taprio_qopt_stats - IEEE 802.1Qbv statistics
+ * @window_drops: Frames that were dropped because they were too large to be
+ * transmitted in any of the allotted time windows (open gates) for their
+ * traffic class.
+ * @tx_overruns: Frames still being transmitted by the MAC after the
+ * transmission gate associated with their traffic class has closed.
+ * Equivalent to `12.29.1.1.2 TransmissionOverrun` from 802.1Q-2018.
+ */
+struct tc_taprio_qopt_stats {
+ u64 window_drops;
+ u64 tx_overruns;
+};
+
+struct tc_taprio_qopt_queue_stats {
+ int queue;
+ struct tc_taprio_qopt_stats stats;
+};
+
struct tc_taprio_sched_entry {
u8 command; /* TC_TAPRIO_CMD_* */
@@ -196,16 +222,26 @@ struct tc_taprio_sched_entry {
};
struct tc_taprio_qopt_offload {
- struct tc_mqprio_qopt_offload mqprio;
- struct netlink_ext_ack *extack;
- u8 enable;
- ktime_t base_time;
- u64 cycle_time;
- u64 cycle_time_extension;
- u32 max_sdu[TC_MAX_QUEUE];
-
- size_t num_entries;
- struct tc_taprio_sched_entry entries[];
+ enum tc_taprio_qopt_cmd cmd;
+
+ union {
+ /* TAPRIO_CMD_STATS */
+ struct tc_taprio_qopt_stats stats;
+ /* TAPRIO_CMD_QUEUE_STATS */
+ struct tc_taprio_qopt_queue_stats queue_stats;
+ /* TAPRIO_CMD_REPLACE */
+ struct {
+ struct tc_mqprio_qopt_offload mqprio;
+ struct netlink_ext_ack *extack;
+ ktime_t base_time;
+ u64 cycle_time;
+ u64 cycle_time_extension;
+ u32 max_sdu[TC_MAX_QUEUE];
+
+ size_t num_entries;
+ struct tc_taprio_sched_entry entries[];
+ };
+ };
};
#if IS_ENABLED(CONFIG_NET_SCH_TAPRIO)
@@ -239,24 +275,6 @@ static inline void skb_txtime_consumed(struct sk_buff *skb)
skb->tstamp = ktime_set(0, 0);
}
-struct tc_skb_cb {
- struct qdisc_skb_cb qdisc_cb;
-
- u16 mru;
- u8 post_ct:1;
- u8 post_ct_snat:1;
- u8 post_ct_dnat:1;
- u16 zone; /* Only valid if post_ct = true */
-};
-
-static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
-{
- struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
-
- BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
- return cb;
-}
-
static inline bool tc_qdisc_stats_dump(struct Qdisc *sch,
unsigned long cl,
struct qdisc_walker *arg)
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 896191f420d5..ebf9e028d1ef 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -140,17 +140,6 @@ struct regulatory_request {
* otherwise initiating radiation is not allowed. This will enable the
* relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration
* option
- * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure
- * all interfaces on this wiphy reside on allowed channels. If this flag
- * is not set, upon a regdomain change, the interfaces are given a grace
- * period (currently 60 seconds) to disconnect or move to an allowed
- * channel. Interfaces on forbidden channels are forcibly disconnected.
- * Currently these types of interfaces are supported for enforcement:
- * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP,
- * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR,
- * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO,
- * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device
- * includes any modes unsupported for enforcement checking.
* @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific
* regdom management. These devices will ignore all regdom changes not
* originating from their own wiphy.
@@ -177,7 +166,7 @@ enum ieee80211_regulatory_flags {
REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3),
REGULATORY_COUNTRY_IE_IGNORE = BIT(4),
REGULATORY_ENABLE_RELAX_NO_IR = BIT(5),
- REGULATORY_IGNORE_STALE_KICKOFF = BIT(6),
+ /* reuse bit 6 next time */
REGULATORY_WIPHY_SELF_MANAGED = BIT(7),
};
@@ -224,6 +213,7 @@ struct ieee80211_reg_rule {
u32 flags;
u32 dfs_cac_ms;
bool has_wmm;
+ s8 psd;
};
struct ieee80211_regdomain {
diff --git a/include/net/route.h b/include/net/route.h
index bcc367cf3aa2..980ab474eabd 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -37,7 +37,7 @@
#define RTO_ONLINK 0x01
-#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+#define RT_CONN_FLAGS(sk) (RT_TOS(READ_ONCE(inet_sk(sk)->tos)) | sock_flag(sk, SOCK_LOCALROUTE))
#define RT_CONN_FLAGS_TOS(sk,tos) (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
static inline __u8 ip_sock_rt_scope(const struct sock *sk)
@@ -50,7 +50,7 @@ static inline __u8 ip_sock_rt_scope(const struct sock *sk)
static inline __u8 ip_sock_rt_tos(const struct sock *sk)
{
- return RT_TOS(inet_sk(sk)->tos);
+ return RT_TOS(READ_ONCE(inet_sk(sk)->tos));
}
struct ip_tunnel_info;
@@ -136,12 +136,6 @@ static inline struct rtable *__ip_route_output_key(struct net *net,
struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
const struct sock *sk);
-struct rtable *ip_route_output_tunnel(struct sk_buff *skb,
- struct net_device *dev,
- struct net *net, __be32 *saddr,
- const struct ip_tunnel_info *info,
- u8 protocol, bool use_cache);
-
struct dst_entry *ipv4_blackhole_route(struct net *net,
struct dst_entry *dst_orig);
@@ -163,12 +157,12 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
}
static inline struct rtable *ip_route_output_ports(struct net *net, struct flowi4 *fl4,
- struct sock *sk,
+ const struct sock *sk,
__be32 daddr, __be32 saddr,
__be16 dport, __be16 sport,
__u8 proto, __u8 tos, int oif)
{
- flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
+ flowi4_init_output(fl4, oif, sk ? READ_ONCE(sk->sk_mark) : 0, tos,
RT_SCOPE_UNIVERSE, proto,
sk ? inet_sk_flowi_flags(sk) : 0,
daddr, saddr, dport, sport, sock_net_uid(net, sk));
@@ -298,10 +292,10 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
{
__u8 flow_flags = 0;
- if (inet_sk(sk)->transparent)
+ if (inet_test_bit(TRANSPARENT, sk))
flow_flags |= FLOWI_FLAG_ANYSRC;
- flowi4_init_output(fl4, oif, sk->sk_mark, ip_sock_rt_tos(sk),
+ flowi4_init_output(fl4, oif, READ_ONCE(sk->sk_mark), ip_sock_rt_tos(sk),
ip_sock_rt_scope(sk), protocol, flow_flags, dst,
src, dport, sport, sk->sk_uid);
}
@@ -309,7 +303,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst,
static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
__be32 src, int oif, u8 protocol,
__be16 sport, __be16 dport,
- struct sock *sk)
+ const struct sock *sk)
{
struct net *net = sock_net(sk);
struct rtable *rt;
@@ -321,8 +315,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
if (IS_ERR(rt))
return rt;
ip_rt_put(rt);
- flowi4_update_output(fl4, oif, fl4->flowi4_tos, fl4->daddr,
- fl4->saddr);
+ flowi4_update_output(fl4, oif, fl4->daddr, fl4->saddr);
}
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(net, fl4, sk);
@@ -331,14 +324,13 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst,
static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable *rt,
__be16 orig_sport, __be16 orig_dport,
__be16 sport, __be16 dport,
- struct sock *sk)
+ const struct sock *sk)
{
if (sport != orig_sport || dport != orig_dport) {
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
ip_rt_put(rt);
- flowi4_update_output(fl4, sk->sk_bound_dev_if,
- RT_CONN_FLAGS(sk), fl4->daddr,
+ flowi4_update_output(fl4, sk->sk_bound_dev_if, fl4->daddr,
fl4->saddr);
security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
return ip_route_output_flow(sock_net(sk), fl4, sk);
diff --git a/include/net/rpl.h b/include/net/rpl.h
index 30fe780d1e7c..74734191c458 100644
--- a/include/net/rpl.h
+++ b/include/net/rpl.h
@@ -23,9 +23,6 @@ static inline int rpl_init(void)
static inline void rpl_exit(void) {}
#endif
-size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
- unsigned char cmpre);
-
void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
const struct ipv6_rpl_sr_hdr *inhdr,
const struct in6_addr *daddr, unsigned char n);
diff --git a/include/net/rsi_91x.h b/include/net/rsi_91x.h
index 040f07b47f1f..b2283762e446 100644
--- a/include/net/rsi_91x.h
+++ b/include/net/rsi_91x.h
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2017 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index d9076a7a430c..6506221c5fe3 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -190,8 +190,8 @@ int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
u32 portid, const struct nlmsghdr *nlh);
-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
- struct netlink_ext_ack *exterr);
+int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer,
+ struct netlink_ext_ack *exterr);
struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 12eadecf8cd0..cefe0c4bdae3 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -19,6 +19,7 @@
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
#include <net/flow_offload.h>
+#include <linux/xarray.h>
struct Qdisc_ops;
struct qdisc_walker;
@@ -237,12 +238,7 @@ static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
{
-#ifdef CONFIG_BQL
- /* Non-BQL migrated drivers will return 0, too. */
- return dql_avail(&txq->dql);
-#else
- return 0;
-#endif
+ return netdev_queue_dql_avail(txq);
}
struct Qdisc_class_ops {
@@ -324,7 +320,6 @@ struct Qdisc_ops {
struct module *owner;
};
-
struct tcf_result {
union {
struct {
@@ -332,7 +327,6 @@ struct tcf_result {
u32 classid;
};
const struct tcf_proto *goto_tp;
-
};
};
@@ -376,6 +370,10 @@ struct tcf_proto_ops {
struct nlattr **tca,
struct netlink_ext_ack *extack);
void (*tmplt_destroy)(void *tmplt_priv);
+ void (*tmplt_reoffload)(struct tcf_chain *chain,
+ bool add,
+ flow_setup_cb_t *cb,
+ void *cb_priv);
struct tcf_exts * (*get_exts)(const struct tcf_proto *tp,
u32 handle);
@@ -458,6 +456,7 @@ struct tcf_chain {
};
struct tcf_block {
+ struct xarray ports; /* datapath accessible */
/* Lock protects tcf_block and lifetime-management data of chains
* attached to the block (refcnt, action_refcnt, explicitly_created).
*/
@@ -484,6 +483,8 @@ struct tcf_block {
struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
};
+struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index);
+
static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
{
return lockdep_is_held(&chain->filter_chain_lock);
@@ -587,6 +588,7 @@ static inline void sch_tree_unlock(struct Qdisc *q)
extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
+extern const u8 sch_default_prio2band[TC_PRIO_MAX + 1];
extern struct Qdisc_ops mq_qdisc_ops;
extern struct Qdisc_ops noqueue_qdisc_ops;
extern const struct Qdisc_ops *default_qdisc_ops;
@@ -599,6 +601,7 @@ get_default_qdisc_ops(const struct net_device *dev, int ntx)
struct Qdisc_class_common {
u32 classid;
+ unsigned int filter_cnt;
struct hlist_node hnode;
};
@@ -633,6 +636,31 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
return NULL;
}
+static inline bool qdisc_class_in_use(const struct Qdisc_class_common *cl)
+{
+ return cl->filter_cnt > 0;
+}
+
+static inline void qdisc_class_get(struct Qdisc_class_common *cl)
+{
+ unsigned int res;
+
+ if (check_add_overflow(cl->filter_cnt, 1, &res))
+ WARN(1, "Qdisc class overflow");
+
+ cl->filter_cnt = res;
+}
+
+static inline void qdisc_class_put(struct Qdisc_class_common *cl)
+{
+ unsigned int res;
+
+ if (check_sub_overflow(cl->filter_cnt, 1, &res))
+ WARN(1, "Qdisc class underflow");
+
+ cl->filter_cnt = res;
+}
+
static inline int tc_classid_to_hwtc(struct net_device *dev, u32 classid)
{
u32 hwtc = TC_H_MIN(classid) - TC_H_MIN_PRIORITY;
@@ -703,7 +731,7 @@ int skb_do_redirect(struct sk_buff *);
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_XGRESS
return skb->tc_at_ingress;
#else
return false;
@@ -1011,6 +1039,37 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
return skb;
}
+struct tc_skb_cb {
+ struct qdisc_skb_cb qdisc_cb;
+ u32 drop_reason;
+
+ u16 zone; /* Only valid if post_ct = true */
+ u16 mru;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
+};
+
+static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
+{
+ struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+ return cb;
+}
+
+static inline enum skb_drop_reason
+tcf_get_drop_reason(const struct sk_buff *skb)
+{
+ return tc_skb_cb(skb)->drop_reason;
+}
+
+static inline void tcf_set_drop_reason(const struct sk_buff *skb,
+ enum skb_drop_reason reason)
+{
+ tc_skb_cb(skb)->drop_reason = reason;
+}
+
/* Instead of calling kfree_skb() while root qdisc lock is held,
* queue the skb for future freeing at end of __dev_xmit_skb()
*/
@@ -1190,20 +1249,6 @@ static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_DROP;
}
-/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
- long it will take to send a packet given its size.
- */
-static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
-{
- int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
- if (slot < 0)
- slot = 0;
- slot >>= rtab->rate.cell_log;
- if (slot > 255)
- return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
- return rtab->data[slot];
-}
-
struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
diff --git a/include/net/scm.h b/include/net/scm.h
index 585adc1346bd..cf68acec4d70 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -5,10 +5,12 @@
#include <linux/limits.h>
#include <linux/net.h>
#include <linux/cred.h>
+#include <linux/file.h>
#include <linux/security.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/sched/signal.h>
+#include <net/compat.h>
/* Well, we should have at least one descriptor open
* to accept passed FDs 8)
@@ -120,15 +122,52 @@ static inline bool scm_has_secdata(struct socket *sock)
}
#endif /* CONFIG_SECURITY_NETWORK */
-static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm, int flags)
+static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm)
+{
+ struct file *pidfd_file = NULL;
+ int len, pidfd;
+
+ /* put_cmsg() doesn't return an error if CMSG is truncated,
+ * that's why we need to opencode these checks here.
+ */
+ if (msg->msg_flags & MSG_CMSG_COMPAT)
+ len = sizeof(struct compat_cmsghdr) + sizeof(int);
+ else
+ len = sizeof(struct cmsghdr) + sizeof(int);
+
+ if (msg->msg_controllen < len) {
+ msg->msg_flags |= MSG_CTRUNC;
+ return;
+ }
+
+ if (!scm->pid)
+ return;
+
+ pidfd = pidfd_prepare(scm->pid, 0, &pidfd_file);
+
+ if (put_cmsg(msg, SOL_SOCKET, SCM_PIDFD, sizeof(int), &pidfd)) {
+ if (pidfd_file) {
+ put_unused_fd(pidfd);
+ fput(pidfd_file);
+ }
+
+ return;
+ }
+
+ if (pidfd_file)
+ fd_install(pidfd, pidfd_file);
+}
+
+static inline bool __scm_recv_common(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags)
{
if (!msg->msg_control) {
- if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp ||
- scm_has_secdata(sock))
+ if (test_bit(SOCK_PASSCRED, &sock->flags) ||
+ test_bit(SOCK_PASSPIDFD, &sock->flags) ||
+ scm->fp || scm_has_secdata(sock))
msg->msg_flags |= MSG_CTRUNC;
scm_destroy(scm);
- return;
+ return false;
}
if (test_bit(SOCK_PASSCRED, &sock->flags)) {
@@ -141,16 +180,42 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds);
}
- scm_destroy_cred(scm);
-
scm_passec(sock, msg, scm);
- if (!scm->fp)
+ if (scm->fp)
+ scm_detach_fds(msg, scm);
+
+ return true;
+}
+
+static inline void scm_recv(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags)
+{
+ if (!__scm_recv_common(sock, msg, scm, flags))
+ return;
+
+ scm_destroy_cred(scm);
+}
+
+static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg,
+ struct scm_cookie *scm, int flags)
+{
+ if (!__scm_recv_common(sock, msg, scm, flags))
return;
-
- scm_detach_fds(msg, scm);
+
+ if (test_bit(SOCK_PASSPIDFD, &sock->flags))
+ scm_pidfd_recv(msg, scm);
+
+ scm_destroy_cred(scm);
}
+static inline int scm_recv_one_fd(struct file *f, int __user *ufd,
+ unsigned int flags)
+{
+ if (!ufd)
+ return -EFAULT;
+ return receive_fd(f, ufd, flags);
+}
#endif /* __LINUX_NET_SCM_H */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 2a67100b2a17..a2310fa995f6 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -148,8 +148,6 @@ void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc,
struct sctp_transport *t);
-void sctp_backlog_migrate(struct sctp_association *assoc,
- struct sock *oldsk, struct sock *newsk);
int sctp_transport_hashtable_init(void);
void sctp_transport_hashtable_destroy(void);
int sctp_hash_transport(struct sctp_transport *t);
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index f37c7a558d6d..64c42bd56bb2 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -156,7 +156,6 @@ sctp_state_fn_t sctp_sf_do_6_2_sack;
sctp_state_fn_t sctp_sf_autoclose_timer_expire;
/* Prototypes for utility support functions. */
-__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
const struct sctp_sm_table_entry *sctp_sm_lookup_event(
struct net *net,
enum sctp_event_type event_type,
@@ -166,8 +165,6 @@ int sctp_chunk_iif(const struct sctp_chunk *);
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
struct sctp_chunk *,
gfp_t gfp);
-__u32 sctp_generate_verification_tag(void);
-void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag);
/* Prototypes for chunk-building functions. */
struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 5c72d1864dd6..5a24d6d8522a 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1122,8 +1122,6 @@ void sctp_outq_free(struct sctp_outq*);
void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
-void sctp_outq_restart(struct sctp_outq *);
-
void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
enum sctp_retransmit_reason reason);
void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
diff --git a/include/net/smc.h b/include/net/smc.h
index a002552be29c..c9dcb30e3fd9 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -52,9 +52,14 @@ struct smcd_dmb {
struct smcd_dev;
struct ism_client;
+struct smcd_gid {
+ u64 gid;
+ u64 gid_ext;
+};
+
struct smcd_ops {
- int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
- u32 vid);
+ int (*query_remote_gid)(struct smcd_dev *dev, struct smcd_gid *rgid,
+ u32 vid_valid, u32 vid);
int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb,
struct ism_client *client);
int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
@@ -62,14 +67,13 @@ struct smcd_ops {
int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
int (*set_vlan_required)(struct smcd_dev *dev);
int (*reset_vlan_required)(struct smcd_dev *dev);
- int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info);
+ int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid,
+ u32 trigger_irq, u32 event_code, u64 info);
int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
bool sf, unsigned int offset, void *data,
unsigned int size);
int (*supports_v2)(void);
- u8* (*get_system_eid)(void);
- u64 (*get_local_gid)(struct smcd_dev *dev);
+ void (*get_local_gid)(struct smcd_dev *dev, struct smcd_gid *gid);
u16 (*get_chid)(struct smcd_dev *dev);
struct device* (*get_dev)(struct smcd_dev *dev);
};
diff --git a/include/net/sock.h b/include/net/sock.h
index 6f428a7f3567..54ca8dcbfb43 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -76,19 +76,6 @@
* the other protocols.
*/
-/* Define this to get the SOCK_DBG debugging facility. */
-#define SOCK_DEBUGGING
-#ifdef SOCK_DEBUGGING
-#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
- printk(KERN_DEBUG msg); } while (0)
-#else
-/* Validate arguments and do nothing */
-static inline __printf(2, 3)
-void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
-{
-}
-#endif
-
/* This is the per-socket lock. The spinlock provides a synchronization
* between user contexts and software interrupt processing, whereas the
* mini-semaphore synchronizes multiple users amongst themselves.
@@ -277,8 +264,6 @@ struct sk_filter;
* @sk_pacing_status: Pacing status (requested, handled by sch_fq)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
- * @__sk_flags_offset: empty field used to determine location of bitfield
- * @sk_padding: unused element for alignment
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
@@ -336,7 +321,7 @@ struct sk_filter;
* @sk_cgrp_data: cgroup data for this cgroup
* @sk_memcg: this socket's memory cgroup association
* @sk_write_pending: a write to stream socket waits to start
- * @sk_wait_pending: number of threads blocked on this socket
+ * @sk_disconnects: number of disconnect operations performed on this sock
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed
* @sk_write_space: callback to indicate there is bf sending space available
@@ -352,7 +337,6 @@ struct sk_filter;
* @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
* @ns_tracker: tracker for netns reference
- * @sk_bind2_node: bind node in the bhash2 table
*/
struct sock {
/*
@@ -429,7 +413,7 @@ struct sock {
unsigned int sk_napi_id;
#endif
int sk_rcvbuf;
- int sk_wait_pending;
+ int sk_disconnects;
struct sk_filter __rcu *sk_filter;
union {
@@ -544,7 +528,6 @@ struct sock {
#endif
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
- struct hlist_node sk_bind2_node;
};
enum sk_pacing {
@@ -873,16 +856,6 @@ static inline void sk_add_bind_node(struct sock *sk,
hlist_add_head(&sk->sk_bind_node, list);
}
-static inline void __sk_del_bind2_node(struct sock *sk)
-{
- __hlist_del(&sk->sk_bind2_node);
-}
-
-static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
-{
- hlist_add_head(&sk->sk_bind2_node, list);
-}
-
#define sk_for_each(__sk, list) \
hlist_for_each_entry(__sk, list, sk_node)
#define sk_for_each_rcu(__sk, list) \
@@ -900,8 +873,6 @@ static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list)
hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
hlist_for_each_entry(__sk, list, sk_bind_node)
-#define sk_for_each_bound_bhash2(__sk, list) \
- hlist_for_each_entry(__sk, list, sk_bind2_node)
/**
* sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
@@ -1053,6 +1024,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
}
+static inline void sk_forward_alloc_add(struct sock *sk, int val)
+{
+ /* Paired with lockless reads of sk->sk_forward_alloc */
+ WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
+}
+
void sk_stream_write_space(struct sock *sk);
/* OOB backlog add */
@@ -1183,8 +1160,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
}
#define sk_wait_event(__sk, __timeo, __condition, __wait) \
- ({ int __rc; \
- __sk->sk_wait_pending++; \
+ ({ int __rc, __dis = __sk->sk_disconnects; \
release_sock(__sk); \
__rc = __condition; \
if (!__rc) { \
@@ -1194,8 +1170,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
} \
sched_annotate_sleep(); \
lock_sock(__sk); \
- __sk->sk_wait_pending--; \
- __rc = __condition; \
+ __rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \
__rc; \
})
@@ -1258,7 +1233,7 @@ struct proto {
bool kern);
int (*ioctl)(struct sock *sk, int cmd,
- unsigned long arg);
+ int *karg);
int (*init)(struct sock *sk);
void (*destroy)(struct sock *sk);
void (*shutdown)(struct sock *sk, int how);
@@ -1277,8 +1252,7 @@ struct proto {
size_t len);
int (*recvmsg)(struct sock *sk, struct msghdr *msg,
size_t len, int flags, int *addr_len);
- int (*sendpage)(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
+ void (*splice_eof)(struct socket *sock);
int (*bind)(struct sock *sk,
struct sockaddr *addr, int addr_len);
int (*bind_add)(struct sock *sk,
@@ -1324,6 +1298,7 @@ struct proto {
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
+ * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes.
* All the __sk_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
@@ -1340,6 +1315,7 @@ struct proto {
struct kmem_cache *slab;
unsigned int obj_size;
+ unsigned int ipv6_pinfo_offset;
slab_flags_t slab_flags;
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
@@ -1376,7 +1352,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk)
if (sk->sk_prot->forward_alloc_get)
return sk->sk_prot->forward_alloc_get(sk);
#endif
- return sk->sk_forward_alloc;
+ return READ_ONCE(sk->sk_forward_alloc);
}
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
@@ -1421,6 +1397,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
return sk->sk_prot->memory_pressure != NULL;
}
+static inline bool sk_under_global_memory_pressure(const struct sock *sk)
+{
+ return sk->sk_prot->memory_pressure &&
+ !!READ_ONCE(*sk->sk_prot->memory_pressure);
+}
+
static inline bool sk_under_memory_pressure(const struct sock *sk)
{
if (!sk->sk_prot->memory_pressure)
@@ -1430,7 +1412,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
mem_cgroup_under_socket_pressure(sk->sk_memcg))
return true;
- return !!*sk->sk_prot->memory_pressure;
+ return !!READ_ONCE(*sk->sk_prot->memory_pressure);
}
static inline long
@@ -1507,7 +1489,7 @@ proto_memory_pressure(struct proto *prot)
{
if (!prot->memory_pressure)
return false;
- return !!*prot->memory_pressure;
+ return !!READ_ONCE(*prot->memory_pressure);
}
@@ -1666,14 +1648,14 @@ static inline void sk_mem_charge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc -= size;
+ sk_forward_alloc_add(sk, -size);
}
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc += size;
+ sk_forward_alloc_add(sk, size);
sk_mem_reclaim(sk);
}
@@ -1810,12 +1792,11 @@ static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
static inline void sock_release_ownership(struct sock *sk)
{
- if (sock_owned_by_user_nocheck(sk)) {
- sk->sk_lock.owned = 0;
+ DEBUG_NET_WARN_ON_ONCE(!sock_owned_by_user_nocheck(sk));
+ sk->sk_lock.owned = 0;
- /* The sk_lock has mutex_unlock() semantics: */
- mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
- }
+ /* The sk_lock has mutex_unlock() semantics: */
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
}
/* no reclassification while locks are held */
@@ -1854,11 +1835,13 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, unsigned int optlen);
int sock_setsockopt(struct socket *sock, int level, int op,
sockptr_t optval, unsigned int optlen);
+int do_sock_setsockopt(struct socket *sock, bool compat, int level,
+ int optname, sockptr_t optval, int optlen);
+int do_sock_getsockopt(struct socket *sock, bool compat, int level,
+ int optname, sockptr_t optval, sockptr_t optlen);
int sk_getsockopt(struct sock *sk, int level, int optname,
sockptr_t optval, sockptr_t optlen);
-int sock_getsockopt(struct socket *sock, int level, int op,
- char __user *optval, int __user *optlen);
int sock_gettstamp(struct socket *sock, void __user *userstamp,
bool timeval, bool time32);
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
@@ -1893,7 +1876,9 @@ struct sockcm_cookie {
static inline void sockcm_init(struct sockcm_cookie *sockc,
const struct sock *sk)
{
- *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
+ *sockc = (struct sockcm_cookie) {
+ .tsflags = READ_ONCE(sk->sk_tsflags)
+ };
}
int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
@@ -1918,10 +1903,6 @@ int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
int sock_no_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma);
-ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
/*
* Functions to fill in entries in struct proto_ops when a protocol
@@ -1997,21 +1978,33 @@ static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
/* sk_tx_queue_mapping accept only upto a 16-bit value */
if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
return;
- sk->sk_tx_queue_mapping = tx_queue;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
}
#define NO_QUEUE_MAPPING USHRT_MAX
static inline void sk_tx_queue_clear(struct sock *sk)
{
- sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
+ /* Paired with READ_ONCE() in sk_tx_queue_get() and
+ * other WRITE_ONCE() because socket lock might be not held.
+ */
+ WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
}
static inline int sk_tx_queue_get(const struct sock *sk)
{
- if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
- return sk->sk_tx_queue_mapping;
+ if (sk) {
+ /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
+ * and sk_tx_queue_set().
+ */
+ int val = READ_ONCE(sk->sk_tx_queue_mapping);
+ if (val != NO_QUEUE_MAPPING)
+ return val;
+ }
return -1;
}
@@ -2100,6 +2093,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
}
kuid_t sock_i_uid(struct sock *sk);
+unsigned long __sock_i_ino(struct sock *sk);
unsigned long sock_i_ino(struct sock *sk);
static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
@@ -2130,14 +2124,14 @@ static inline bool sk_rethink_txhash(struct sock *sk)
}
static inline struct dst_entry *
-__sk_dst_get(struct sock *sk)
+__sk_dst_get(const struct sock *sk)
{
return rcu_dereference_check(sk->sk_dst_cache,
lockdep_sock_is_held(sk));
}
static inline struct dst_entry *
-sk_dst_get(struct sock *sk)
+sk_dst_get(const struct sock *sk)
{
struct dst_entry *dst;
@@ -2159,7 +2153,7 @@ static inline void __dst_negative_advice(struct sock *sk)
if (ndst != dst) {
rcu_assign_pointer(sk->sk_dst_cache, ndst);
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
}
}
}
@@ -2176,7 +2170,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
old_dst = rcu_dereference_protected(sk->sk_dst_cache,
lockdep_sock_is_held(sk));
rcu_assign_pointer(sk->sk_dst_cache, dst);
@@ -2189,7 +2183,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
- sk->sk_dst_pending_confirm = 0;
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
dst_release(old_dst);
}
@@ -2227,7 +2221,7 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
}
}
-bool sk_mc_loop(struct sock *sk);
+bool sk_mc_loop(const struct sock *sk);
static inline bool sk_can_gso(const struct sock *sk)
{
@@ -2691,9 +2685,9 @@ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
- ktime_t kt = skb->tstamp;
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
-
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+ ktime_t kt = skb->tstamp;
/*
* generate control messages if
* - receive time stamping in software requested
@@ -2701,10 +2695,10 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
* - hardware time stamps available and wanted
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
- (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
- (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
+ (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
(hwtstamps->hwtstamp &&
- (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
sock_write_timestamp(sk, kt);
@@ -2726,7 +2720,8 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY)
+ if (sk->sk_flags & FLAGS_RECV_CMSGS ||
+ READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
__sock_recv_cmsgs(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
@@ -2770,9 +2765,30 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
+static inline bool sk_is_inet(const struct sock *sk)
+{
+ int family = READ_ONCE(sk->sk_family);
+
+ return family == AF_INET || family == AF_INET6;
+}
+
static inline bool sk_is_tcp(const struct sock *sk)
{
- return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
+ return sk_is_inet(sk) &&
+ sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+ return sk_is_inet(sk) &&
+ sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP;
+}
+
+static inline bool sk_is_stream_unix(const struct sock *sk)
+{
+ return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
}
/**
@@ -2818,20 +2834,23 @@ sk_is_refcounted(struct sock *sk)
* skb_steal_sock - steal a socket from an sk_buff
* @skb: sk_buff to steal the socket from
* @refcounted: is set to true if the socket is reference-counted
+ * @prefetched: is set to true if the socket was assigned from bpf
*/
static inline struct sock *
-skb_steal_sock(struct sk_buff *skb, bool *refcounted)
+skb_steal_sock(struct sk_buff *skb, bool *refcounted, bool *prefetched)
{
if (skb->sk) {
struct sock *sk = skb->sk;
*refcounted = true;
- if (skb_sk_is_prefetched(skb))
+ *prefetched = skb_sk_is_prefetched(skb);
+ if (*prefetched)
*refcounted = sk_is_refcounted(sk);
skb->destructor = NULL;
skb->sk = NULL;
return sk;
}
+ *prefetched = false;
*refcounted = false;
return NULL;
}
@@ -2893,7 +2912,6 @@ extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
extern int sysctl_tstamp_allow_data;
-extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
@@ -2973,6 +2991,9 @@ int sock_get_timeout(long timeo, void *optval, bool old_timeval);
int sock_copy_user_timeval(struct __kernel_sock_timeval *tv,
sockptr_t optval, int optlen, bool old_timeval);
+int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
+ void __user *arg, void *karg, size_t size);
+int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
static inline bool sk_is_readable(struct sock *sk)
{
if (sk->sk_prot->sock_is_readable)
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index ca0312b78294..8346b0d29542 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -201,8 +201,6 @@ struct switchdev_obj_in_state_mrp {
#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \
container_of((OBJ), struct switchdev_obj_in_state_mrp, obj)
-typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
-
struct switchdev_brport {
struct net_device *dev;
const void *ctx;
@@ -231,6 +229,7 @@ enum switchdev_notifier_type {
SWITCHDEV_BRPORT_OFFLOADED,
SWITCHDEV_BRPORT_UNOFFLOADED,
+ SWITCHDEV_BRPORT_REPLAY,
};
struct switchdev_notifier_info {
@@ -299,11 +298,19 @@ void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
const void *ctx,
struct notifier_block *atomic_nb,
struct notifier_block *blocking_nb);
+int switchdev_bridge_port_replay(struct net_device *brport_dev,
+ struct net_device *dev, const void *ctx,
+ struct notifier_block *atomic_nb,
+ struct notifier_block *blocking_nb,
+ struct netlink_ext_ack *extack);
void switchdev_deferred_process(void);
int switchdev_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack);
+bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
+ enum switchdev_notifier_type nt,
+ const struct switchdev_obj *obj);
int switchdev_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack);
@@ -322,10 +329,6 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info,
struct netlink_ext_ack *extack);
-void switchdev_port_fwd_mark_set(struct net_device *dev,
- struct net_device *group_dev,
- bool joining);
-
int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
const struct switchdev_notifier_fdb_info *fdb_info,
bool (*check_cb)(const struct net_device *dev),
diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h
index b24ea2d9400b..77f87c622a2e 100644
--- a/include/net/tc_act/tc_ct.h
+++ b/include/net/tc_act/tc_ct.h
@@ -22,6 +22,7 @@ struct tcf_ct_params {
struct nf_nat_range2 range;
bool ipv4_range;
+ bool put_labels;
u16 ct_action;
@@ -57,6 +58,11 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
return to_ct_params(a)->nf_ft;
}
+static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
+{
+ return to_ct_params(a)->helper;
+}
+
#else
static inline uint16_t tcf_ct_zone(const struct tc_action *a) { return 0; }
static inline int tcf_ct_action(const struct tc_action *a) { return 0; }
@@ -64,6 +70,10 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
{
return NULL;
}
+static inline struct nf_conntrack_helper *tcf_ct_helper(const struct tc_action *a)
+{
+ return NULL;
+}
#endif /* CONFIG_NF_CONNTRACK */
#if IS_ENABLED(CONFIG_NET_ACT_CT)
diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h
deleted file mode 100644
index 4225fcb1c6ba..000000000000
--- a/include/net/tc_act/tc_ipt.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NET_TC_IPT_H
-#define __NET_TC_IPT_H
-
-#include <net/act_api.h>
-
-struct xt_entry_target;
-
-struct tcf_ipt {
- struct tc_action common;
- u32 tcfi_hook;
- char *tcfi_tname;
- struct xt_entry_target *tcfi_t;
-};
-#define to_ipt(a) ((struct tcf_ipt *)a)
-
-#endif /* __NET_TC_IPT_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 32ce8ea36950..75722d967bf2 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,6 +8,7 @@
struct tcf_mirred {
struct tc_action common;
int tcfm_eaction;
+ u32 tcfm_blockid;
bool tcfm_mac_header_xmit;
struct net_device __rcu *tcfm_dev;
netdevice_tracker tcfm_dev_tracker;
diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
index a6d481b5bcbc..a608546bcefc 100644
--- a/include/net/tc_wrapper.h
+++ b/include/net/tc_wrapper.h
@@ -117,10 +117,6 @@ static inline int tc_act(struct sk_buff *skb, const struct tc_action *a,
if (a->ops->act == tcf_ife_act)
return tcf_ife_act(skb, a, res);
#endif
-#if IS_BUILTIN(CONFIG_NET_ACT_IPT)
- if (a->ops->act == tcf_ipt_act)
- return tcf_ipt_act(skb, a, res);
-#endif
#if IS_BUILTIN(CONFIG_NET_ACT_SIMP)
if (a->ops->act == tcf_simp_act)
return tcf_simp_act(skb, a, res);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5066e4586cf0..f6eba9652d01 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -37,6 +37,7 @@
#include <net/snmp.h>
#include <net/ip.h>
#include <net/tcp_states.h>
+#include <net/tcp_ao.h>
#include <net/inet_ecn.h>
#include <net/dst.h>
#include <net/mptcp.h>
@@ -131,6 +132,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
+static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
+
#if HZ >= 100
#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
#define TCP_ATO_MIN ((unsigned)(HZ/25))
@@ -141,6 +144,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_RTO_MAX ((unsigned)(120*HZ))
#define TCP_RTO_MIN ((unsigned)(HZ/5))
#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
+
+#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
+
#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
* used as a fallback RTO for the
@@ -161,9 +167,12 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_KEEPCNT 127
#define MAX_TCP_SYNCNT 127
-#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
+/* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
+ * to avoid overflows. This assumes a clock smaller than 1 Mhz.
+ * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
+ */
+#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
-#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
* after this time. It should be equal
* (or greater than) TCP_TIMEWAIT_LEN
@@ -186,6 +195,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
+#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
#define TCPOPT_EXP 254 /* Experimental */
@@ -324,25 +334,20 @@ int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);
void tcp_remove_empty_skb(struct sock *sk);
-int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
size_t size, struct ubuf_info *uarg);
-int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
- int flags);
-int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
-ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
+void tcp_splice_eof(struct socket *sock);
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
+int tcp_wmem_schedule(struct sock *sk, int copy);
void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
int size_goal);
void tcp_release_cb(struct sock *sk);
void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
void tcp_delack_timer_handler(struct sock *sk);
-int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int tcp_ioctl(struct sock *sk, int cmd, int *karg);
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_space_adjust(struct sock *sk);
@@ -352,16 +357,17 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family);
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
-struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
bool force_schedule);
-void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
-static inline void tcp_dec_quickack_mode(struct sock *sk,
- const unsigned int pkts)
+static inline void tcp_dec_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ack.quick) {
+ /* How many ACKs S/ACKing new data have we sent? */
+ const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
+
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */
@@ -432,7 +438,6 @@ int tcp_mmap(struct file *file, struct socket *sock,
void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab, struct tcp_fastopen_cookie *foc);
-const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
* BPF SKB-less helpers
@@ -485,13 +490,14 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst, u32 tsoff);
-int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
- u32 cookie);
+ struct dst_entry *dst);
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
- const struct tcp_request_sock_ops *af_ops,
- struct sock *sk, struct sk_buff *skb);
+ struct sock *sk, struct sk_buff *skb,
+ struct tcp_options_received *tcp_opt,
+ int mss, u32 tsoff);
+
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 60 seconds.
@@ -577,12 +583,15 @@ __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
u64 cookie_init_timestamp(struct request_sock *req, u64 now);
bool cookie_timestamp_decode(const struct net *net,
struct tcp_options_received *opt);
-bool cookie_ecn_ok(const struct tcp_options_received *opt,
- const struct net *net, const struct dst_entry *dst);
+
+static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
+{
+ return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
+ dst_feature(dst, RTAX_FEATURE_ECN);
+}
/* From net/ipv6/syncookies.c */
-int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
- u32 cookie);
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
@@ -611,7 +620,6 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
unsigned int mss_now, gfp_t gfp);
void tcp_send_probe0(struct sock *);
-void tcp_send_partial(struct sock *);
int tcp_write_wakeup(struct sock *, int mib);
void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
@@ -629,7 +637,6 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
void tcp_reset(struct sock *sk, struct sk_buff *skb);
-void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
void tcp_fin(struct sock *sk);
void tcp_check_space(struct sock *sk);
void tcp_sack_compress_send_ack(struct sock *sk);
@@ -728,8 +735,10 @@ static inline void tcp_fast_path_check(struct sock *sk)
tcp_fast_path_on(tp);
}
+u32 tcp_delack_max(const struct sock *sk);
+
/* Compute the actual rto_min value */
-static inline u32 tcp_rto_min(struct sock *sk)
+static inline u32 tcp_rto_min(const struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = inet_csk(sk)->icsk_rto_min;
@@ -739,7 +748,7 @@ static inline u32 tcp_rto_min(struct sock *sk)
return rto_min;
}
-static inline u32 tcp_rto_min_us(struct sock *sk)
+static inline u32 tcp_rto_min_us(const struct sock *sk)
{
return jiffies_to_usecs(tcp_rto_min(sk));
}
@@ -799,22 +808,31 @@ static inline u64 tcp_clock_us(void)
return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
}
-/* This should only be used in contexts where tp->tcp_mstamp is up to date */
-static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
+static inline u64 tcp_clock_ms(void)
{
- return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
+ return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
}
-/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
-static inline u32 tcp_ns_to_ts(u64 ns)
+/* TCP Timestamp included in TS option (RFC 1323) can either use ms
+ * or usec resolution. Each socket carries a flag to select one or other
+ * resolution, as the route attribute could change anytime.
+ * Each flow must stick to initial resolution.
+ */
+static inline u32 tcp_clock_ts(bool usec_ts)
{
- return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+ return usec_ts ? tcp_clock_us() : tcp_clock_ms();
}
-/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
-static inline u32 tcp_time_stamp_raw(void)
+static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
{
- return tcp_ns_to_ts(tcp_clock_ns());
+ return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
+}
+
+static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
+{
+ if (tp->tcp_usec_ts)
+ return tp->tcp_mstamp;
+ return tcp_time_stamp_ms(tp);
}
void tcp_mstamp_refresh(struct tcp_sock *tp);
@@ -824,17 +842,30 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
return max_t(s64, t1 - t0, 0);
}
-static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
-{
- return tcp_ns_to_ts(skb->skb_mstamp_ns);
-}
-
/* provide the departure time in us unit */
static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
{
return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
}
+/* Provide skb TSval in usec or ms unit */
+static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
+{
+ if (usec_ts)
+ return tcp_skb_timestamp_us(skb);
+
+ return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
+}
+
+static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
+{
+ return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
+}
+
+static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
+{
+ return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
+}
#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
@@ -1437,13 +1468,41 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
__u32 *window_clamp, int wscale_ok,
__u8 *rcv_wscale, __u32 init_rcv_wnd);
+static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
+{
+ s64 scaled_space = (s64)space * scaling_ratio;
+
+ return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
+}
+
static inline int tcp_win_from_space(const struct sock *sk, int space)
{
- int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
+ return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
+}
+
+/* inverse of __tcp_win_from_space() */
+static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
+{
+ u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
+
+ do_div(val, scaling_ratio);
+ return val;
+}
- return tcp_adv_win_scale <= 0 ?
- (space>>(-tcp_adv_win_scale)) :
- space - (space>>tcp_adv_win_scale);
+static inline int tcp_space_from_win(const struct sock *sk, int win)
+{
+ return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
+}
+
+/* Assume a conservative default of 1200 bytes of payload per 4K page.
+ * This may be adjusted later in tcp_measure_rcv_mss().
+ */
+#define TCP_DEFAULT_SCALING_RATIO ((1200 << TCP_RMEM_TO_WIN_SCALE) / \
+ SKB_TRUESIZE(4096))
+
+static inline void tcp_scaling_ratio_init(struct sock *sk)
+{
+ tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
}
/* Note: caller must be prepared to deal with negative returns */
@@ -1459,17 +1518,22 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
-static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
{
int unused_mem = sk_unused_reserved_mem(sk);
struct tcp_sock *tp = tcp_sk(sk);
- tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
if (unused_mem)
tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
tcp_win_from_space(sk, unused_mem));
}
+static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
+{
+ __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
+}
+
void tcp_cleanup_rbuf(struct sock *sk, int copied);
void __tcp_cleanup_rbuf(struct sock *sk, int copied);
@@ -1514,25 +1578,38 @@ void tcp_leave_memory_pressure(struct sock *sk);
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
+ int val;
- return tp->keepalive_intvl ? :
- READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
+ * and do_tcp_setsockopt().
+ */
+ val = READ_ONCE(tp->keepalive_intvl);
+
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
}
static inline int keepalive_time_when(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
+ int val;
+
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
+ val = READ_ONCE(tp->keepalive_time);
- return tp->keepalive_time ? :
- READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
}
static inline int keepalive_probes(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
+ int val;
- return tp->keepalive_probes ? :
- READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
+ /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
+ * and do_tcp_setsockopt().
+ */
+ val = READ_ONCE(tp->keepalive_probes);
+
+ return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
}
static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
@@ -1561,7 +1638,7 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
return true;
if (unlikely(!time_before32(ktime_get_seconds(),
- rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
+ rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
return true;
/*
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
@@ -1621,12 +1698,7 @@ static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
tp->retransmit_skb_hint = NULL;
}
-union tcp_md5_addr {
- struct in_addr a4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct in6_addr a6;
-#endif
-};
+#define tcp_md5_addr tcp_ao_addr
/* - key database */
struct tcp_md5sig_key {
@@ -1670,12 +1742,39 @@ union tcp_md5sum_block {
#endif
};
-/* - pool: digest algorithm, hash description and scratch buffer */
-struct tcp_md5sig_pool {
- struct ahash_request *md5_req;
- void *scratch;
+/*
+ * struct tcp_sigpool - per-CPU pool of ahash_requests
+ * @scratch: per-CPU temporary area, that can be used between
+ * tcp_sigpool_start() and tcp_sigpool_end() to perform
+ * crypto request
+ * @req: pre-allocated ahash request
+ */
+struct tcp_sigpool {
+ void *scratch;
+ struct ahash_request *req;
};
+int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
+void tcp_sigpool_get(unsigned int id);
+void tcp_sigpool_release(unsigned int id);
+int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
+ const struct sk_buff *skb,
+ unsigned int header_len);
+
+/**
+ * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
+ * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
+ * @c: returned tcp_sigpool for usage (uninitialized on failure)
+ *
+ * Returns 0 on success, error otherwise.
+ */
+int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
+/**
+ * tcp_sigpool_end - enable bh and stop using tcp_sigpool
+ * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
+ */
+void tcp_sigpool_end(struct tcp_sigpool *c);
+size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
/* - functions */
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb);
@@ -1688,28 +1787,36 @@ int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags);
+void tcp_clear_md5_list(struct sock *sk);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-#include <linux/jump_label.h>
-extern struct static_key_false_deferred tcp_md5_needed;
struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
- int family);
+ int family, bool any_l3index);
static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr, int family)
{
if (!static_branch_unlikely(&tcp_md5_needed.key))
return NULL;
- return __tcp_md5_do_lookup(sk, l3index, addr, family);
+ return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
+}
+
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup_any_l3index(const struct sock *sk,
+ const union tcp_md5_addr *addr, int family)
+{
+ if (!static_branch_unlikely(&tcp_md5_needed.key))
+ return NULL;
+ return __tcp_md5_do_lookup(sk, 0, addr, family, true);
}
enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
- int family, int dif, int sdif);
+ int family, int l3index, const __u8 *hash_location);
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
@@ -1721,27 +1828,29 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index,
return NULL;
}
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup_any_l3index(const struct sock *sk,
+ const union tcp_md5_addr *addr, int family)
+{
+ return NULL;
+}
+
static inline enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
- int family, int dif, int sdif)
+ int family, int l3index, const __u8 *hash_location)
{
return SKB_NOT_DROPPED_YET;
}
#define tcp_twsk_md5_key(twsk) NULL
#endif
-bool tcp_alloc_md5sig_pool(void);
+int tcp_md5_alloc_sigpool(void);
+void tcp_md5_release_sigpool(void);
+void tcp_md5_add_sigpool(void);
+extern int tcp_md5_sigpool_id;
-struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
-static inline void tcp_put_md5sig_pool(void)
-{
- local_bh_enable();
-}
-
-int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
- unsigned int header_len);
-int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+int tcp_md5_hash_key(struct tcp_sigpool *hp,
const struct tcp_md5sig_key *key);
/* From tcp_fastopen.c */
@@ -2001,7 +2110,7 @@ static inline bool inet_sk_transparent(const struct sock *sk)
case TCP_NEW_SYN_RECV:
return inet_rsk(inet_reqsk(sk))->no_srccheck;
}
- return inet_sk(sk)->transparent;
+ return inet_test_bit(TRANSPARENT, sk);
}
/* Determines whether this is a thin stream (which may suffer from
@@ -2046,14 +2155,22 @@ INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff))
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
-int tcp_gro_complete(struct sk_buff *skb);
+#ifdef CONFIG_INET
+void tcp_gro_complete(struct sk_buff *skb);
+#else
+static inline void tcp_gro_complete(struct sk_buff *skb) { }
+#endif
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
struct net *net = sock_net((struct sock *)tp);
- return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
+ u32 val;
+
+ val = READ_ONCE(tp->notsent_lowat);
+
+ return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
}
bool tcp_stream_memory_free(const struct sock *sk, int wake);
@@ -2082,6 +2199,18 @@ struct tcp_sock_af_ops {
sockptr_t optval,
int optlen);
#endif
+#ifdef CONFIG_TCP_AO
+ int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
+ struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
+ struct sock *addr_sk,
+ int sndid, int rcvid);
+ int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
+ const struct sock *sk,
+ __be32 sisn, __be32 disn, bool send);
+ int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
+ const struct sock *sk, const struct sk_buff *skb,
+ const u8 *tkey, int hash_offset, u32 sne);
+#endif
};
struct tcp_request_sock_ops {
@@ -2094,6 +2223,15 @@ struct tcp_request_sock_ops {
const struct sock *sk,
const struct sk_buff *skb);
#endif
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
+ struct request_sock *req,
+ int sndid, int rcvid);
+ int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
+ int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
+ struct request_sock *req, const struct sk_buff *skb,
+ int hash_offset, u32 sne);
+#endif
#ifdef CONFIG_SYN_COOKIES
__u32 (*cookie_init_seq)(const struct sk_buff *skb,
__u16 *mss);
@@ -2134,6 +2272,76 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
}
#endif
+struct tcp_key {
+ union {
+ struct {
+ struct tcp_ao_key *ao_key;
+ char *traffic_key;
+ u32 sne;
+ u8 rcv_next;
+ };
+ struct tcp_md5sig_key *md5_key;
+ };
+ enum {
+ TCP_KEY_NONE = 0,
+ TCP_KEY_MD5,
+ TCP_KEY_AO,
+ } type;
+};
+
+static inline void tcp_get_current_key(const struct sock *sk,
+ struct tcp_key *out)
+{
+#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
+ const struct tcp_sock *tp = tcp_sk(sk);
+#endif
+
+#ifdef CONFIG_TCP_AO
+ if (static_branch_unlikely(&tcp_ao_needed.key)) {
+ struct tcp_ao_info *ao;
+
+ ao = rcu_dereference_protected(tp->ao_info,
+ lockdep_sock_is_held(sk));
+ if (ao) {
+ out->ao_key = READ_ONCE(ao->current_key);
+ out->type = TCP_KEY_AO;
+ return;
+ }
+ }
+#endif
+#ifdef CONFIG_TCP_MD5SIG
+ if (static_branch_unlikely(&tcp_md5_needed.key) &&
+ rcu_access_pointer(tp->md5sig_info)) {
+ out->md5_key = tp->af_specific->md5_lookup(sk, sk);
+ if (out->md5_key) {
+ out->type = TCP_KEY_MD5;
+ return;
+ }
+ }
+#endif
+ out->type = TCP_KEY_NONE;
+}
+
+static inline bool tcp_key_is_md5(const struct tcp_key *key)
+{
+#ifdef CONFIG_TCP_MD5SIG
+ if (static_branch_unlikely(&tcp_md5_needed.key) &&
+ key->type == TCP_KEY_MD5)
+ return true;
+#endif
+ return false;
+}
+
+static inline bool tcp_key_is_ao(const struct tcp_key *key)
+{
+#ifdef CONFIG_TCP_AO
+ if (static_branch_unlikely(&tcp_ao_needed.key) &&
+ key->type == TCP_KEY_AO)
+ return true;
+#endif
+ return false;
+}
+
int tcpv4_offload_init(void);
void tcp_v4_init(void);
@@ -2298,7 +2506,7 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
/* diagnostic */
- int (*get_info)(const struct sock *sk, struct sk_buff *skb);
+ int (*get_info)(struct sock *sk, struct sk_buff *skb);
size_t (*get_info_size)(const struct sock *sk);
/* clone ulp */
void (*clone)(const struct request_sock *req, struct sock *newsk,
@@ -2324,7 +2532,6 @@ struct sk_msg;
struct sk_psock;
#ifdef CONFIG_BPF_SYSCALL
-struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
#endif /* CONFIG_BPF_SYSCALL */
@@ -2493,4 +2700,116 @@ static inline u64 tcp_transmit_time(const struct sock *sk)
return 0;
}
+static inline int tcp_parse_auth_options(const struct tcphdr *th,
+ const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
+{
+ const u8 *md5_tmp, *ao_tmp;
+ int ret;
+
+ ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
+ if (ret)
+ return ret;
+
+ if (md5_hash)
+ *md5_hash = md5_tmp;
+
+ if (aoh) {
+ if (!ao_tmp)
+ *aoh = NULL;
+ else
+ *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
+ }
+
+ return 0;
+}
+
+static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
+ int family, int l3index, bool stat_inc)
+{
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info *ao_info;
+ struct tcp_ao_key *ao_key;
+
+ if (!static_branch_unlikely(&tcp_ao_needed.key))
+ return false;
+
+ ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
+ lockdep_sock_is_held(sk));
+ if (!ao_info)
+ return false;
+
+ ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
+ if (ao_info->ao_required || ao_key) {
+ if (stat_inc) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
+ atomic64_inc(&ao_info->counters.ao_required);
+ }
+ return true;
+ }
+#endif
+ return false;
+}
+
+/* Called with rcu_read_lock() */
+static inline enum skb_drop_reason
+tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
+ const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif)
+{
+ const struct tcphdr *th = tcp_hdr(skb);
+ const struct tcp_ao_hdr *aoh;
+ const __u8 *md5_location;
+ int l3index;
+
+ /* Invalid option or two times meet any of auth options */
+ if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
+ tcp_hash_fail("TCP segment has incorrect auth options set",
+ family, skb, "");
+ return SKB_DROP_REASON_TCP_AUTH_HDR;
+ }
+
+ if (req) {
+ if (tcp_rsk_used_ao(req) != !!aoh) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
+ tcp_hash_fail("TCP connection can't start/end using TCP-AO",
+ family, skb, "%s",
+ !aoh ? "missing AO" : "AO signed");
+ return SKB_DROP_REASON_TCP_AOFAILURE;
+ }
+ }
+
+ /* sdif set, means packet ingressed via a device
+ * in an L3 domain and dif is set to the l3mdev
+ */
+ l3index = sdif ? dif : 0;
+
+ /* Fast path: unsigned segments */
+ if (likely(!md5_location && !aoh)) {
+ /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
+ * for the remote peer. On TCP-AO established connection
+ * the last key is impossible to remove, so there's
+ * always at least one current_key.
+ */
+ if (tcp_ao_required(sk, saddr, family, l3index, true)) {
+ tcp_hash_fail("AO hash is required, but not found",
+ family, skb, "L3 index %d", l3index);
+ return SKB_DROP_REASON_TCP_AONOTFOUND;
+ }
+ if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+ tcp_hash_fail("MD5 Hash not found",
+ family, skb, "L3 index %d", l3index);
+ return SKB_DROP_REASON_TCP_MD5NOTFOUND;
+ }
+ return SKB_NOT_DROPPED_YET;
+ }
+
+ if (aoh)
+ return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
+
+ return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
+ l3index, md5_location);
+}
+
#endif /* _TCP_H */
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
new file mode 100644
index 000000000000..471e177362b4
--- /dev/null
+++ b/include/net/tcp_ao.h
@@ -0,0 +1,387 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _TCP_AO_H
+#define _TCP_AO_H
+
+#define TCP_AO_KEY_ALIGN 1
+#define __tcp_ao_key_align __aligned(TCP_AO_KEY_ALIGN)
+
+union tcp_ao_addr {
+ struct in_addr a4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr a6;
+#endif
+};
+
+struct tcp_ao_hdr {
+ u8 kind;
+ u8 length;
+ u8 keyid;
+ u8 rnext_keyid;
+};
+
+struct tcp_ao_counters {
+ atomic64_t pkt_good;
+ atomic64_t pkt_bad;
+ atomic64_t key_not_found;
+ atomic64_t ao_required;
+ atomic64_t dropped_icmp;
+};
+
+struct tcp_ao_key {
+ struct hlist_node node;
+ union tcp_ao_addr addr;
+ u8 key[TCP_AO_MAXKEYLEN] __tcp_ao_key_align;
+ unsigned int tcp_sigpool_id;
+ unsigned int digest_size;
+ int l3index;
+ u8 prefixlen;
+ u8 family;
+ u8 keylen;
+ u8 keyflags;
+ u8 sndid;
+ u8 rcvid;
+ u8 maclen;
+ struct rcu_head rcu;
+ atomic64_t pkt_good;
+ atomic64_t pkt_bad;
+ u8 traffic_keys[];
+};
+
+static inline u8 *rcv_other_key(struct tcp_ao_key *key)
+{
+ return key->traffic_keys;
+}
+
+static inline u8 *snd_other_key(struct tcp_ao_key *key)
+{
+ return key->traffic_keys + key->digest_size;
+}
+
+static inline int tcp_ao_maclen(const struct tcp_ao_key *key)
+{
+ return key->maclen;
+}
+
+/* Use tcp_ao_len_aligned() for TCP header calculations */
+static inline int tcp_ao_len(const struct tcp_ao_key *key)
+{
+ return tcp_ao_maclen(key) + sizeof(struct tcp_ao_hdr);
+}
+
+static inline int tcp_ao_len_aligned(const struct tcp_ao_key *key)
+{
+ return round_up(tcp_ao_len(key), 4);
+}
+
+static inline unsigned int tcp_ao_digest_size(struct tcp_ao_key *key)
+{
+ return key->digest_size;
+}
+
+static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
+{
+ return sizeof(struct tcp_ao_key) + (key->digest_size << 1);
+}
+
+struct tcp_ao_info {
+ /* List of tcp_ao_key's */
+ struct hlist_head head;
+ /* current_key and rnext_key aren't maintained on listen sockets.
+ * Their purpose is to cache keys on established connections,
+ * saving needless lookups. Never dereference any of them from
+ * listen sockets.
+ * ::current_key may change in RX to the key that was requested by
+ * the peer, please use READ_ONCE()/WRITE_ONCE() in order to avoid
+ * load/store tearing.
+ * Do the same for ::rnext_key, if you don't hold socket lock
+ * (it's changed only by userspace request in setsockopt()).
+ */
+ struct tcp_ao_key *current_key;
+ struct tcp_ao_key *rnext_key;
+ struct tcp_ao_counters counters;
+ u32 ao_required :1,
+ accept_icmps :1,
+ __unused :30;
+ __be32 lisn;
+ __be32 risn;
+ /* Sequence Number Extension (SNE) are upper 4 bytes for SEQ,
+ * that protect TCP-AO connection from replayed old TCP segments.
+ * See RFC5925 (6.2).
+ * In order to get correct SNE, there's a helper tcp_ao_compute_sne().
+ * It needs SEQ basis to understand whereabouts are lower SEQ numbers.
+ * According to that basis vector, it can provide incremented SNE
+ * when SEQ rolls over or provide decremented SNE when there's
+ * a retransmitted segment from before-rolling over.
+ * - for request sockets such basis is rcv_isn/snt_isn, which seems
+ * good enough as it's unexpected to receive 4 Gbytes on reqsk.
+ * - for full sockets the basis is rcv_nxt/snd_una. snd_una is
+ * taken instead of snd_nxt as currently it's easier to track
+ * in tcp_snd_una_update(), rather than updating SNE in all
+ * WRITE_ONCE(tp->snd_nxt, ...)
+ * - for time-wait sockets the basis is tw_rcv_nxt/tw_snd_nxt.
+ * tw_snd_nxt is not expected to change, while tw_rcv_nxt may.
+ */
+ u32 snd_sne;
+ u32 rcv_sne;
+ refcount_t refcnt; /* Protects twsk destruction */
+ struct rcu_head rcu;
+};
+
+#ifdef CONFIG_TCP_MD5SIG
+#include <linux/jump_label.h>
+extern struct static_key_false_deferred tcp_md5_needed;
+#define static_branch_tcp_md5() static_branch_unlikely(&tcp_md5_needed.key)
+#else
+#define static_branch_tcp_md5() false
+#endif
+#ifdef CONFIG_TCP_AO
+/* TCP-AO structures and functions */
+#include <linux/jump_label.h>
+extern struct static_key_false_deferred tcp_ao_needed;
+#define static_branch_tcp_ao() static_branch_unlikely(&tcp_ao_needed.key)
+#else
+#define static_branch_tcp_ao() false
+#endif
+
+static inline bool tcp_hash_should_produce_warnings(void)
+{
+ return static_branch_tcp_md5() || static_branch_tcp_ao();
+}
+
+#define tcp_hash_fail(msg, family, skb, fmt, ...) \
+do { \
+ const struct tcphdr *th = tcp_hdr(skb); \
+ char hdr_flags[6]; \
+ char *f = hdr_flags; \
+ \
+ if (!tcp_hash_should_produce_warnings()) \
+ break; \
+ if (th->fin) \
+ *f++ = 'F'; \
+ if (th->syn) \
+ *f++ = 'S'; \
+ if (th->rst) \
+ *f++ = 'R'; \
+ if (th->psh) \
+ *f++ = 'P'; \
+ if (th->ack) \
+ *f++ = '.'; \
+ *f = 0; \
+ if ((family) == AF_INET) { \
+ net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \
+ msg, &ip_hdr(skb)->saddr, ntohs(th->source), \
+ &ip_hdr(skb)->daddr, ntohs(th->dest), \
+ hdr_flags, ##__VA_ARGS__); \
+ } else { \
+ net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \
+ msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \
+ &ipv6_hdr(skb)->daddr, ntohs(th->dest), \
+ hdr_flags, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+#ifdef CONFIG_TCP_AO
+/* TCP-AO structures and functions */
+struct tcp4_ao_context {
+ __be32 saddr;
+ __be32 daddr;
+ __be16 sport;
+ __be16 dport;
+ __be32 sisn;
+ __be32 disn;
+};
+
+struct tcp6_ao_context {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ __be16 sport;
+ __be16 dport;
+ __be32 sisn;
+ __be32 disn;
+};
+
+struct tcp_sigpool;
+#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
+ TCPF_CLOSE | TCPF_CLOSE_WAIT | \
+ TCPF_LAST_ACK | TCPF_CLOSING)
+
+int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ struct tcp_ao_key *key, struct tcphdr *th,
+ __u8 *hash_location);
+int tcp_ao_hash_skb(unsigned short int family,
+ char *ao_hash, struct tcp_ao_key *key,
+ const struct sock *sk, const struct sk_buff *skb,
+ const u8 *tkey, int hash_offset, u32 sne);
+int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family,
+ sockptr_t optval, int optlen);
+struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao,
+ int sndid, int rcvid);
+int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk,
+ struct request_sock *req, struct sk_buff *skb,
+ int family);
+int tcp_ao_calc_traffic_key(struct tcp_ao_key *mkt, u8 *key, void *ctx,
+ unsigned int len, struct tcp_sigpool *hp);
+void tcp_ao_destroy_sock(struct sock *sk, bool twsk);
+void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp);
+bool tcp_ao_ignore_icmp(const struct sock *sk, int family, int type, int code);
+int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen);
+int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen);
+int tcp_ao_get_repair(struct sock *sk, sockptr_t optval, sockptr_t optlen);
+int tcp_ao_set_repair(struct sock *sk, sockptr_t optval, unsigned int optlen);
+enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk,
+ const struct sk_buff *skb, unsigned short int family,
+ const struct request_sock *req, int l3index,
+ const struct tcp_ao_hdr *aoh);
+u32 tcp_ao_compute_sne(u32 next_sne, u32 next_seq, u32 seq);
+struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk, int l3index,
+ const union tcp_ao_addr *addr,
+ int family, int sndid, int rcvid);
+int tcp_ao_hash_hdr(unsigned short family, char *ao_hash,
+ struct tcp_ao_key *key, const u8 *tkey,
+ const union tcp_ao_addr *daddr,
+ const union tcp_ao_addr *saddr,
+ const struct tcphdr *th, u32 sne);
+int tcp_ao_prepare_reset(const struct sock *sk, struct sk_buff *skb,
+ const struct tcp_ao_hdr *aoh, int l3index, u32 seq,
+ struct tcp_ao_key **key, char **traffic_key,
+ bool *allocated_traffic_key, u8 *keyid, u32 *sne);
+
+/* ipv4 specific functions */
+int tcp_v4_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen);
+struct tcp_ao_key *tcp_v4_ao_lookup(const struct sock *sk, struct sock *addr_sk,
+ int sndid, int rcvid);
+int tcp_v4_ao_synack_hash(char *ao_hash, struct tcp_ao_key *mkt,
+ struct request_sock *req, const struct sk_buff *skb,
+ int hash_offset, u32 sne);
+int tcp_v4_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key,
+ const struct sock *sk,
+ __be32 sisn, __be32 disn, bool send);
+int tcp_v4_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key,
+ struct request_sock *req);
+struct tcp_ao_key *tcp_v4_ao_lookup_rsk(const struct sock *sk,
+ struct request_sock *req,
+ int sndid, int rcvid);
+int tcp_v4_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key,
+ const struct sock *sk, const struct sk_buff *skb,
+ const u8 *tkey, int hash_offset, u32 sne);
+/* ipv6 specific functions */
+int tcp_v6_ao_hash_pseudoheader(struct tcp_sigpool *hp,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr, int nbytes);
+int tcp_v6_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key,
+ const struct sk_buff *skb, __be32 sisn, __be32 disn);
+int tcp_v6_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key,
+ const struct sock *sk, __be32 sisn,
+ __be32 disn, bool send);
+int tcp_v6_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key,
+ struct request_sock *req);
+struct tcp_ao_key *tcp_v6_ao_lookup(const struct sock *sk,
+ struct sock *addr_sk, int sndid, int rcvid);
+struct tcp_ao_key *tcp_v6_ao_lookup_rsk(const struct sock *sk,
+ struct request_sock *req,
+ int sndid, int rcvid);
+int tcp_v6_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key,
+ const struct sock *sk, const struct sk_buff *skb,
+ const u8 *tkey, int hash_offset, u32 sne);
+int tcp_v6_parse_ao(struct sock *sk, int cmd, sockptr_t optval, int optlen);
+int tcp_v6_ao_synack_hash(char *ao_hash, struct tcp_ao_key *ao_key,
+ struct request_sock *req, const struct sk_buff *skb,
+ int hash_offset, u32 sne);
+void tcp_ao_established(struct sock *sk);
+void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb);
+void tcp_ao_connect_init(struct sock *sk);
+void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
+ struct request_sock *req, unsigned short int family);
+#else /* CONFIG_TCP_AO */
+
+static inline int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ struct tcp_ao_key *key, struct tcphdr *th,
+ __u8 *hash_location)
+{
+ return 0;
+}
+
+static inline void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
+ struct request_sock *req, unsigned short int family)
+{
+}
+
+static inline bool tcp_ao_ignore_icmp(const struct sock *sk, int family,
+ int type, int code)
+{
+ return false;
+}
+
+static inline enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk,
+ const struct sk_buff *skb, unsigned short int family,
+ const struct request_sock *req, int l3index,
+ const struct tcp_ao_hdr *aoh)
+{
+ return SKB_NOT_DROPPED_YET;
+}
+
+static inline struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk,
+ int l3index, const union tcp_ao_addr *addr,
+ int family, int sndid, int rcvid)
+{
+ return NULL;
+}
+
+static inline void tcp_ao_destroy_sock(struct sock *sk, bool twsk)
+{
+}
+
+static inline void tcp_ao_established(struct sock *sk)
+{
+}
+
+static inline void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb)
+{
+}
+
+static inline void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw,
+ struct tcp_sock *tp)
+{
+}
+
+static inline void tcp_ao_connect_init(struct sock *sk)
+{
+}
+
+static inline int tcp_ao_get_mkts(struct sock *sk, sockptr_t optval, sockptr_t optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline int tcp_ao_get_sock_info(struct sock *sk, sockptr_t optval, sockptr_t optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline int tcp_ao_get_repair(struct sock *sk,
+ sockptr_t optval, sockptr_t optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline int tcp_ao_set_repair(struct sock *sk,
+ sockptr_t optval, unsigned int optlen)
+{
+ return -ENOPROTOOPT;
+}
+#endif
+
+#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
+int tcp_do_parse_auth_options(const struct tcphdr *th,
+ const u8 **md5_hash, const u8 **ao_hash);
+#else
+static inline int tcp_do_parse_auth_options(const struct tcphdr *th,
+ const u8 **md5_hash, const u8 **ao_hash)
+{
+ *md5_hash = NULL;
+ *ao_hash = NULL;
+ return 0;
+}
+#endif
+
+#endif /* _TCP_AO_H */
diff --git a/include/net/tcp_states.h b/include/net/tcp_states.h
index cc00118acca1..d60e8148ff4c 100644
--- a/include/net/tcp_states.h
+++ b/include/net/tcp_states.h
@@ -22,6 +22,7 @@ enum {
TCP_LISTEN,
TCP_CLOSING, /* Now a valid state */
TCP_NEW_SYN_RECV,
+ TCP_BOUND_INACTIVE, /* Pseudo-state for inet_diag */
TCP_MAX_STATES /* Leave at the end! */
};
@@ -43,6 +44,7 @@ enum {
TCPF_LISTEN = (1 << TCP_LISTEN),
TCPF_CLOSING = (1 << TCP_CLOSING),
TCPF_NEW_SYN_RECV = (1 << TCP_NEW_SYN_RECV),
+ TCPF_BOUND_INACTIVE = (1 << TCP_BOUND_INACTIVE),
};
#endif /* _LINUX_TCP_STATES_H */
diff --git a/include/net/tcx.h b/include/net/tcx.h
new file mode 100644
index 000000000000..04be9377785d
--- /dev/null
+++ b/include/net/tcx.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Isovalent */
+#ifndef __NET_TCX_H
+#define __NET_TCX_H
+
+#include <linux/bpf.h>
+#include <linux/bpf_mprog.h>
+
+#include <net/sch_generic.h>
+
+struct mini_Qdisc;
+
+struct tcx_entry {
+ struct mini_Qdisc __rcu *miniq;
+ struct bpf_mprog_bundle bundle;
+ bool miniq_active;
+ struct rcu_head rcu;
+};
+
+struct tcx_link {
+ struct bpf_link link;
+ struct net_device *dev;
+ u32 location;
+};
+
+static inline void tcx_set_ingress(struct sk_buff *skb, bool ingress)
+{
+#ifdef CONFIG_NET_XGRESS
+ skb->tc_at_ingress = ingress;
+#endif
+}
+
+#ifdef CONFIG_NET_XGRESS
+static inline struct tcx_entry *tcx_entry(struct bpf_mprog_entry *entry)
+{
+ struct bpf_mprog_bundle *bundle = entry->parent;
+
+ return container_of(bundle, struct tcx_entry, bundle);
+}
+
+static inline struct tcx_link *tcx_link(const struct bpf_link *link)
+{
+ return container_of(link, struct tcx_link, link);
+}
+
+void tcx_inc(void);
+void tcx_dec(void);
+
+static inline void tcx_entry_sync(void)
+{
+ /* bpf_mprog_entry got a/b swapped, therefore ensure that
+ * there are no inflight users on the old one anymore.
+ */
+ synchronize_rcu();
+}
+
+static inline void
+tcx_entry_update(struct net_device *dev, struct bpf_mprog_entry *entry,
+ bool ingress)
+{
+ ASSERT_RTNL();
+ if (ingress)
+ rcu_assign_pointer(dev->tcx_ingress, entry);
+ else
+ rcu_assign_pointer(dev->tcx_egress, entry);
+}
+
+static inline struct bpf_mprog_entry *
+tcx_entry_fetch(struct net_device *dev, bool ingress)
+{
+ ASSERT_RTNL();
+ if (ingress)
+ return rcu_dereference_rtnl(dev->tcx_ingress);
+ else
+ return rcu_dereference_rtnl(dev->tcx_egress);
+}
+
+static inline struct bpf_mprog_entry *tcx_entry_create(void)
+{
+ struct tcx_entry *tcx = kzalloc(sizeof(*tcx), GFP_KERNEL);
+
+ if (tcx) {
+ bpf_mprog_bundle_init(&tcx->bundle);
+ return &tcx->bundle.a;
+ }
+ return NULL;
+}
+
+static inline void tcx_entry_free(struct bpf_mprog_entry *entry)
+{
+ kfree_rcu(tcx_entry(entry), rcu);
+}
+
+static inline struct bpf_mprog_entry *
+tcx_entry_fetch_or_create(struct net_device *dev, bool ingress, bool *created)
+{
+ struct bpf_mprog_entry *entry = tcx_entry_fetch(dev, ingress);
+
+ *created = false;
+ if (!entry) {
+ entry = tcx_entry_create();
+ if (!entry)
+ return NULL;
+ *created = true;
+ }
+ return entry;
+}
+
+static inline void tcx_skeys_inc(bool ingress)
+{
+ tcx_inc();
+ if (ingress)
+ net_inc_ingress_queue();
+ else
+ net_inc_egress_queue();
+}
+
+static inline void tcx_skeys_dec(bool ingress)
+{
+ if (ingress)
+ net_dec_ingress_queue();
+ else
+ net_dec_egress_queue();
+ tcx_dec();
+}
+
+static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry,
+ const bool active)
+{
+ ASSERT_RTNL();
+ tcx_entry(entry)->miniq_active = active;
+}
+
+static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry)
+{
+ ASSERT_RTNL();
+ return bpf_mprog_total(entry) || tcx_entry(entry)->miniq_active;
+}
+
+static inline enum tcx_action_base tcx_action_code(struct sk_buff *skb,
+ int code)
+{
+ switch (code) {
+ case TCX_PASS:
+ skb->tc_index = qdisc_skb_cb(skb)->tc_classid;
+ fallthrough;
+ case TCX_DROP:
+ case TCX_REDIRECT:
+ return code;
+ case TCX_NEXT:
+ default:
+ return TCX_NEXT;
+ }
+}
+#endif /* CONFIG_NET_XGRESS */
+
+#if defined(CONFIG_NET_XGRESS) && defined(CONFIG_BPF_SYSCALL)
+int tcx_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int tcx_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int tcx_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog);
+void tcx_uninstall(struct net_device *dev, bool ingress);
+
+int tcx_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+
+static inline void dev_tcx_uninstall(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ tcx_uninstall(dev, true);
+ tcx_uninstall(dev, false);
+}
+#else
+static inline int tcx_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int tcx_link_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int tcx_prog_detach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int tcx_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline void dev_tcx_uninstall(struct net_device *dev)
+{
+}
+#endif /* CONFIG_NET_XGRESS && CONFIG_BPF_SYSCALL */
+#endif /* __NET_TCX_H */
diff --git a/include/net/tls.h b/include/net/tls.h
index 596595c4b1af..340ad43971e4 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -51,16 +51,6 @@
struct tls_rec;
-struct tls_cipher_size_desc {
- unsigned int iv;
- unsigned int key;
- unsigned int salt;
- unsigned int tag;
- unsigned int rec_seq;
-};
-
-extern const struct tls_cipher_size_desc tls_cipher_size_desc[];
-
/* Maximum data size carried in a TLS record */
#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
@@ -69,13 +59,10 @@ extern const struct tls_cipher_size_desc tls_cipher_size_desc[];
#define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
-#define TLS_RECORD_TYPE_ALERT 0x15
-#define TLS_RECORD_TYPE_HANDSHAKE 0x16
-#define TLS_RECORD_TYPE_DATA 0x17
-
#define TLS_AAD_SPACE_SIZE 13
-#define MAX_IV_SIZE 16
+#define TLS_MAX_IV_SIZE 16
+#define TLS_MAX_SALT_SIZE 4
#define TLS_TAG_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
#define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE
@@ -110,9 +97,6 @@ struct tls_sw_context_tx {
struct tls_rec *open_rec;
struct list_head tx_list;
atomic_t encrypt_pending;
- /* protect crypto_wait with encrypt_pending */
- spinlock_t encrypt_compl_lock;
- int async_notify;
u8 async_capable:1;
#define BIT_TX_SCHEDULED 0
@@ -149,8 +133,6 @@ struct tls_sw_context_rx {
struct tls_strparser strp;
atomic_t decrypt_pending;
- /* protect crypto_wait with decrypt_pending*/
- spinlock_t decrypt_compl_lock;
struct sk_buff_head async_hold;
struct wait_queue_head wq;
};
@@ -163,6 +145,7 @@ struct tls_record_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
+#define TLS_DRIVER_STATE_SIZE_TX 16
struct tls_offload_context_tx {
struct crypto_aead *aead_send;
spinlock_t lock; /* protects records list */
@@ -176,17 +159,13 @@ struct tls_offload_context_tx {
void (*sk_destruct)(struct sock *sk);
struct work_struct destruct_work;
struct tls_context *ctx;
- u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
-#define TLS_DRIVER_STATE_SIZE_TX 16
+ u8 driver_state[TLS_DRIVER_STATE_SIZE_TX] __aligned(8);
};
-#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
- (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
-
enum tls_context_flags {
/* tls_device_down was called after the netdev went down, device state
* was released, and kTLS works in software, even though rx_conf is
@@ -207,8 +186,8 @@ enum tls_context_flags {
};
struct cipher_context {
- char *iv;
- char *rec_seq;
+ char iv[TLS_MAX_IV_SIZE + TLS_MAX_SALT_SIZE];
+ char rec_seq[TLS_MAX_REC_SEQ_SIZE];
};
union tls_crypto_context {
@@ -259,7 +238,7 @@ struct tls_context {
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
- bool in_tcp_sendpages;
+ bool splicing_pages;
bool pending_open_record_frags;
struct mutex tx_lock; /* protects partially_sent_* fields and
@@ -316,6 +295,7 @@ struct tls_offload_resync_async {
u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
};
+#define TLS_DRIVER_STATE_SIZE_RX 8
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
@@ -339,17 +319,13 @@ struct tls_offload_context_rx {
struct tls_offload_resync_async *resync_async;
};
};
- u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
-#define TLS_DRIVER_STATE_SIZE_RX 8
+ u8 driver_state[TLS_DRIVER_STATE_SIZE_RX] __aligned(8);
};
-#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
- (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
-
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
@@ -370,10 +346,12 @@ struct sk_buff *
tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);
-static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
+static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb)
{
-#ifdef CONFIG_SOCK_VALIDATE_XMIT
- return sk_fullsock(sk) &&
+#ifdef CONFIG_TLS_DEVICE
+ struct sock *sk = skb->sk;
+
+ return sk && sk_fullsock(sk) &&
(smp_load_acquire(&sk->sk_validate_xmit_skb) ==
&tls_validate_xmit_skb);
#else
diff --git a/include/net/tls_prot.h b/include/net/tls_prot.h
new file mode 100644
index 000000000000..68a40756440b
--- /dev/null
+++ b/include/net/tls_prot.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2023, Oracle and/or its affiliates.
+ *
+ * TLS Protocol definitions
+ *
+ * From https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
+ */
+
+#ifndef _TLS_PROT_H
+#define _TLS_PROT_H
+
+/*
+ * TLS Record protocol: ContentType
+ */
+enum {
+ TLS_RECORD_TYPE_CHANGE_CIPHER_SPEC = 20,
+ TLS_RECORD_TYPE_ALERT = 21,
+ TLS_RECORD_TYPE_HANDSHAKE = 22,
+ TLS_RECORD_TYPE_DATA = 23,
+ TLS_RECORD_TYPE_HEARTBEAT = 24,
+ TLS_RECORD_TYPE_TLS12_CID = 25,
+ TLS_RECORD_TYPE_ACK = 26,
+};
+
+/*
+ * TLS Alert protocol: AlertLevel
+ */
+enum {
+ TLS_ALERT_LEVEL_WARNING = 1,
+ TLS_ALERT_LEVEL_FATAL = 2,
+};
+
+/*
+ * TLS Alert protocol: AlertDescription
+ */
+enum {
+ TLS_ALERT_DESC_CLOSE_NOTIFY = 0,
+ TLS_ALERT_DESC_UNEXPECTED_MESSAGE = 10,
+ TLS_ALERT_DESC_BAD_RECORD_MAC = 20,
+ TLS_ALERT_DESC_RECORD_OVERFLOW = 22,
+ TLS_ALERT_DESC_HANDSHAKE_FAILURE = 40,
+ TLS_ALERT_DESC_BAD_CERTIFICATE = 42,
+ TLS_ALERT_DESC_UNSUPPORTED_CERTIFICATE = 43,
+ TLS_ALERT_DESC_CERTIFICATE_REVOKED = 44,
+ TLS_ALERT_DESC_CERTIFICATE_EXPIRED = 45,
+ TLS_ALERT_DESC_CERTIFICATE_UNKNOWN = 46,
+ TLS_ALERT_DESC_ILLEGAL_PARAMETER = 47,
+ TLS_ALERT_DESC_UNKNOWN_CA = 48,
+ TLS_ALERT_DESC_ACCESS_DENIED = 49,
+ TLS_ALERT_DESC_DECODE_ERROR = 50,
+ TLS_ALERT_DESC_DECRYPT_ERROR = 51,
+ TLS_ALERT_DESC_TOO_MANY_CIDS_REQUESTED = 52,
+ TLS_ALERT_DESC_PROTOCOL_VERSION = 70,
+ TLS_ALERT_DESC_INSUFFICIENT_SECURITY = 71,
+ TLS_ALERT_DESC_INTERNAL_ERROR = 80,
+ TLS_ALERT_DESC_INAPPROPRIATE_FALLBACK = 86,
+ TLS_ALERT_DESC_USER_CANCELED = 90,
+ TLS_ALERT_DESC_MISSING_EXTENSION = 109,
+ TLS_ALERT_DESC_UNSUPPORTED_EXTENSION = 110,
+ TLS_ALERT_DESC_UNRECOGNIZED_NAME = 112,
+ TLS_ALERT_DESC_BAD_CERTIFICATE_STATUS_RESPONSE = 113,
+ TLS_ALERT_DESC_UNKNOWN_PSK_IDENTITY = 115,
+ TLS_ALERT_DESC_CERTIFICATE_REQUIRED = 116,
+ TLS_ALERT_DESC_NO_APPLICATION_PROTOCOL = 120,
+};
+
+#endif /* _TLS_PROT_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index d27b1caf3753..1a97e3f32029 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -33,8 +33,6 @@ void udplitev6_exit(void);
int tcpv6_init(void);
void tcpv6_exit(void);
-int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
-
/* this does all the common and the specific ctl work */
void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb);
diff --git a/include/net/udp.h b/include/net/udp.h
index de4b528522bb..488a6d2babcc 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/bug.h>
#include <net/inet_sock.h>
+#include <net/gso.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
@@ -272,18 +273,16 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
int udp_v4_early_demux(struct sk_buff *skb);
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
-int udp_get_port(struct sock *sk, unsigned short snum,
- int (*saddr_cmp)(const struct sock *,
- const struct sock *));
int udp_err(struct sk_buff *, u32);
int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
+void udp_splice_eof(struct socket *sock);
int udp_push_pending_frames(struct sock *sk);
void udp_flush_pending_frames(struct sock *sk);
int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
-int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int udp_ioctl(struct sock *sk, int cmd, int *karg);
int udp_init_sock(struct sock *sk);
int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
@@ -437,7 +436,6 @@ struct udp_seq_afinfo {
struct udp_iter_state {
struct seq_net_private p;
int bucket;
- struct udp_seq_afinfo *bpf_seq_afinfo;
};
void *udp_seq_start(struct seq_file *seq, loff_t *pos);
@@ -528,7 +526,6 @@ static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
#ifdef CONFIG_BPF_SYSCALL
struct sk_psock;
-struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
#endif
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 0ca9b7a11baf..d716214fe03d 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -154,13 +154,30 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb,
- struct net_device *dev, struct in6_addr *saddr,
- struct in6_addr *daddr,
+ struct net_device *dev,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
__u8 prio, __u8 ttl, __be32 label,
__be16 src_port, __be16 dst_port, bool nocheck);
void udp_tunnel_sock_release(struct socket *sock);
+struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net, int oif,
+ __be32 *saddr,
+ const struct ip_tunnel_key *key,
+ __be16 sport, __be16 dport, u8 tos,
+ struct dst_cache *dst_cache);
+struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
+ struct net_device *dev,
+ struct net *net,
+ struct socket *sock, int oif,
+ struct in6_addr *saddr,
+ const struct ip_tunnel_key *key,
+ __be16 sport, __be16 dport, u8 dsfield,
+ struct dst_cache *dst_cache);
+
struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
__be16 flags, __be64 tunnel_id,
int md_size);
@@ -174,16 +191,13 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
}
#endif
-static inline void udp_tunnel_encap_enable(struct socket *sock)
+static inline void udp_tunnel_encap_enable(struct sock *sk)
{
- struct udp_sock *up = udp_sk(sock->sk);
-
- if (up->encap_enabled)
+ if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
return;
- up->encap_enabled = 1;
#if IS_ENABLED(CONFIG_IPV6)
- if (sock->sk->sk_family == PF_INET6)
+ if (READ_ONCE(sk->sk_family) == PF_INET6)
ipv6_stub->udpv6_encap_enable();
#endif
udp_encap_enable();
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 299c14ce2bb9..786919d29f8d 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -66,14 +66,18 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
- const struct udp_sock *up = udp_sk(skb->sk);
const int off = skb_transport_offset(skb);
+ const struct sock *sk = skb->sk;
int len = skb->len - off;
- if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
- if (0 < up->pcslen)
- len = up->pcslen;
- udp_hdr(skb)->len = htons(up->pcslen);
+ if (udp_test_bit(UDPLITE_SEND_CC, sk)) {
+ u16 pcslen = READ_ONCE(udp_sk(sk)->pcslen);
+
+ if (pcslen < len) {
+ if (pcslen > 0)
+ len = pcslen;
+ udp_hdr(skb)->len = htons(pcslen);
+ }
}
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
@@ -81,6 +85,4 @@ static inline __wsum udplite_csum(struct sk_buff *skb)
}
void udplite4_register(void);
-int udplite_get_port(struct sock *sk, unsigned short snum,
- int (*scmp)(const struct sock *, const struct sock *));
#endif /* _UDPLITE_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 20bd7d893e10..33ba6fc151cf 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -210,22 +210,23 @@ struct vxlan_rdst {
};
struct vxlan_config {
- union vxlan_addr remote_ip;
- union vxlan_addr saddr;
- __be32 vni;
- int remote_ifindex;
- int mtu;
- __be16 dst_port;
- u16 port_min;
- u16 port_max;
- u8 tos;
- u8 ttl;
- __be32 label;
- u32 flags;
- unsigned long age_interval;
- unsigned int addrmax;
- bool no_share;
- enum ifla_vxlan_df df;
+ union vxlan_addr remote_ip;
+ union vxlan_addr saddr;
+ __be32 vni;
+ int remote_ifindex;
+ int mtu;
+ __be16 dst_port;
+ u16 port_min;
+ u16 port_max;
+ u8 tos;
+ u8 ttl;
+ __be32 label;
+ enum ifla_vxlan_label_policy label_policy;
+ u32 flags;
+ unsigned long age_interval;
+ unsigned int addrmax;
+ bool no_share;
+ enum ifla_vxlan_df df;
};
enum {
@@ -328,6 +329,7 @@ struct vxlan_dev {
#define VXLAN_F_TTL_INHERIT 0x10000
#define VXLAN_F_VNIFILTER 0x20000
#define VXLAN_F_MDB 0x40000
+#define VXLAN_F_LOCALBYPASS 0x80000
/* Flags that are used in the receive path. These flags must match in
* order for a socket to be shareable
@@ -348,7 +350,8 @@ struct vxlan_dev {
VXLAN_F_UDP_ZERO_CSUM6_TX | \
VXLAN_F_UDP_ZERO_CSUM6_RX | \
VXLAN_F_COLLECT_METADATA | \
- VXLAN_F_VNIFILTER)
+ VXLAN_F_VNIFILTER | \
+ VXLAN_F_LOCALBYPASS)
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);
@@ -384,10 +387,15 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
return features;
}
-/* IP header + UDP + VXLAN + Ethernet header */
-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
-/* IPv6 header + UDP + VXLAN + Ethernet header */
-#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+static inline int vxlan_headroom(u32 flags)
+{
+ /* VXLAN: IP4/6 header + UDP + VXLAN + Ethernet header */
+ /* VXLAN-GPE: IP4/6 header + UDP + VXLAN */
+ return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr) +
+ (flags & VXLAN_F_GPE ? 0 : ETH_HLEN);
+}
static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
{
@@ -549,12 +557,12 @@ static inline void vxlan_flag_attr_error(int attrtype,
}
static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh,
- int hash,
+ u32 hash,
struct vxlan_rdst *rdst)
{
struct fib_nh_common *nhc;
- nhc = nexthop_path_fdb_result(nh, hash);
+ nhc = nexthop_path_fdb_result(nh, hash >> 1);
if (unlikely(!nhc))
return false;
diff --git a/include/net/xdp.h b/include/net/xdp.h
index d1c5381fc95f..e6770dd40c91 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -6,16 +6,17 @@
#ifndef __LINUX_NET_XDP_H__
#define __LINUX_NET_XDP_H__
-#include <linux/skbuff.h> /* skb_shared_info */
-#include <uapi/linux/netdev.h>
#include <linux/bitfield.h>
+#include <linux/filter.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h> /* skb_shared_info */
/**
* DOC: XDP RX-queue information
*
* The XDP RX-queue info (xdp_rxq_info) is associated with the driver
* level RX-ring queues. It is information that is specific to how
- * the driver have configured a given RX-ring queue.
+ * the driver has configured a given RX-ring queue.
*
* Each xdp_buff frame received in the driver carries a (pointer)
* reference to this xdp_rxq_info structure. This provides the XDP
@@ -31,7 +32,7 @@
* The struct is not directly tied to the XDP prog. A new XDP prog
* can be attached as long as it doesn't change the underlying
* RX-ring. If the RX-ring does change significantly, the NIC driver
- * naturally need to stop the RX-ring before purging and reallocating
+ * naturally needs to stop the RX-ring before purging and reallocating
* memory. In that process the driver MUST call unregister (which
* also applies for driver shutdown and unload). The register API is
* also mandatory during RX-ring setup.
@@ -45,8 +46,6 @@ enum xdp_mem_type {
MEM_TYPE_MAX,
};
-typedef u32 xdp_features_t;
-
/* XDP flags for ndo_xdp_xmit */
#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */
#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH
@@ -370,7 +369,12 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
static inline bool xdp_metalen_invalid(unsigned long metalen)
{
- return (metalen & (sizeof(__u32) - 1)) || (metalen > 32);
+ unsigned long meta_max;
+
+ meta_max = type_max(typeof_member(struct skb_shared_info, meta_len));
+ BUILD_BUG_ON(!__builtin_constant_p(meta_max));
+
+ return !IS_ALIGNED(metalen, sizeof(u32)) || metalen > meta_max;
}
struct xdp_attachment_info {
@@ -384,14 +388,29 @@ void xdp_attachment_setup(struct xdp_attachment_info *info,
#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
+/* Define the relationship between xdp-rx-metadata kfunc and
+ * various other entities:
+ * - xdp_rx_metadata enum
+ * - netdev netlink enum (Documentation/netlink/specs/netdev.yaml)
+ * - kfunc name
+ * - xdp_metadata_ops field
+ */
#define XDP_METADATA_KFUNC_xxx \
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_TIMESTAMP, \
- bpf_xdp_metadata_rx_timestamp) \
+ NETDEV_XDP_RX_METADATA_TIMESTAMP, \
+ bpf_xdp_metadata_rx_timestamp, \
+ xmo_rx_timestamp) \
XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_HASH, \
- bpf_xdp_metadata_rx_hash) \
-
-enum {
-#define XDP_METADATA_KFUNC(name, _) name,
+ NETDEV_XDP_RX_METADATA_HASH, \
+ bpf_xdp_metadata_rx_hash, \
+ xmo_rx_hash) \
+ XDP_METADATA_KFUNC(XDP_METADATA_KFUNC_RX_VLAN_TAG, \
+ NETDEV_XDP_RX_METADATA_VLAN_TAG, \
+ bpf_xdp_metadata_rx_vlan_tag, \
+ xmo_rx_vlan_tag) \
+
+enum xdp_rx_metadata {
+#define XDP_METADATA_KFUNC(name, _, __, ___) name,
XDP_METADATA_KFUNC_xxx
#undef XDP_METADATA_KFUNC
MAX_XDP_METADATA_KFUNC,
@@ -417,6 +436,7 @@ enum xdp_rss_hash_type {
XDP_RSS_L4_UDP = BIT(5),
XDP_RSS_L4_SCTP = BIT(6),
XDP_RSS_L4_IPSEC = BIT(7), /* L4 based hash include IPSEC SPI */
+ XDP_RSS_L4_ICMP = BIT(8),
/* Second part: RSS hash type combinations used for driver HW mapping */
XDP_RSS_TYPE_NONE = 0,
@@ -432,17 +452,27 @@ enum xdp_rss_hash_type {
XDP_RSS_TYPE_L4_IPV4_UDP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
XDP_RSS_TYPE_L4_IPV4_SCTP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
XDP_RSS_TYPE_L4_IPV4_IPSEC = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+ XDP_RSS_TYPE_L4_IPV4_ICMP = XDP_RSS_L3_IPV4 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
XDP_RSS_TYPE_L4_IPV6_TCP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_TCP,
XDP_RSS_TYPE_L4_IPV6_UDP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_UDP,
XDP_RSS_TYPE_L4_IPV6_SCTP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_SCTP,
XDP_RSS_TYPE_L4_IPV6_IPSEC = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_IPSEC,
+ XDP_RSS_TYPE_L4_IPV6_ICMP = XDP_RSS_L3_IPV6 | XDP_RSS_L4 | XDP_RSS_L4_ICMP,
XDP_RSS_TYPE_L4_IPV6_TCP_EX = XDP_RSS_TYPE_L4_IPV6_TCP | XDP_RSS_L3_DYNHDR,
XDP_RSS_TYPE_L4_IPV6_UDP_EX = XDP_RSS_TYPE_L4_IPV6_UDP | XDP_RSS_L3_DYNHDR,
XDP_RSS_TYPE_L4_IPV6_SCTP_EX = XDP_RSS_TYPE_L4_IPV6_SCTP | XDP_RSS_L3_DYNHDR,
};
+struct xdp_metadata_ops {
+ int (*xmo_rx_timestamp)(const struct xdp_md *ctx, u64 *timestamp);
+ int (*xmo_rx_hash)(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type);
+ int (*xmo_rx_vlan_tag)(const struct xdp_md *ctx, __be16 *vlan_proto,
+ u16 *vlan_tci);
+};
+
#ifdef CONFIG_NET
u32 bpf_xdp_metadata_kfunc_id(int id);
bool bpf_dev_bound_kfunc_id(u32 btf_id);
@@ -474,4 +504,20 @@ static inline void xdp_clear_features_flag(struct net_device *dev)
xdp_set_features_flag(dev, 0);
}
+static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
+ struct xdp_buff *xdp)
+{
+ /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus
+ * under local_bh_disable(), which provides the needed RCU protection
+ * for accessing map entries.
+ */
+ u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
+
+ if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
+ if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
+ act = xdp_master_redirect(xdp);
+ }
+
+ return act;
+}
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index e96a1151ec75..3cb4dc9bd70e 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -14,6 +14,8 @@
#include <linux/mm.h>
#include <net/sock.h>
+#define XDP_UMEM_SG_FLAG (1 << 1)
+
struct net_device;
struct xsk_queue;
struct xdp_buff;
@@ -28,6 +30,7 @@ struct xdp_umem {
struct user_struct *user;
refcount_t users;
u8 flags;
+ u8 tx_metadata_len;
bool zc;
struct page **pgs;
int id;
@@ -52,6 +55,7 @@ struct xdp_sock {
struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
+ bool sg;
enum {
XSK_READY = 0,
XSK_BOUND,
@@ -60,6 +64,13 @@ struct xdp_sock {
struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head tx_list;
+ /* record the number of tx descriptors sent by this xsk and
+ * when it exceeds MAX_PER_SOCKET_BUDGET, an opportunity needs
+ * to be given to other xsks for sending tx descriptors, thereby
+ * preventing other XSKs from being starved.
+ */
+ u32 tx_budget_spent;
+
/* Protects generic receive. */
spinlock_t rx_lock;
@@ -67,6 +78,12 @@ struct xdp_sock {
u64 rx_dropped;
u64 rx_queue_full;
+ /* When __xsk_generic_xmit() must return before it sees the EOP descriptor for the current
+ * packet, the partially built skb is saved here so that packet building can resume in next
+ * call of __xsk_generic_xmit().
+ */
+ struct sk_buff *skb;
+
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
@@ -76,12 +93,105 @@ struct xdp_sock {
struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};
+/*
+ * AF_XDP TX metadata hooks for network devices.
+ * The following hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer.
+ *
+ * void (*tmo_request_timestamp)(void *priv)
+ * Called when AF_XDP frame requested egress timestamp.
+ *
+ * u64 (*tmo_fill_timestamp)(void *priv)
+ * Called when AF_XDP frame, that had requested egress timestamp,
+ * received a completion. The hook needs to return the actual HW timestamp.
+ *
+ * void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv)
+ * Called when AF_XDP frame requested HW checksum offload. csum_start
+ * indicates position where checksumming should start.
+ * csum_offset indicates position where checksum should be stored.
+ *
+ */
+struct xsk_tx_metadata_ops {
+ void (*tmo_request_timestamp)(void *priv);
+ u64 (*tmo_fill_timestamp)(void *priv);
+ void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv);
+};
+
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(void);
+/**
+ * xsk_tx_metadata_to_compl - Save enough relevant metadata information
+ * to perform tx completion in the future.
+ * @meta: pointer to AF_XDP metadata area
+ * @compl: pointer to output struct xsk_tx_metadata_to_compl
+ *
+ * This function should be called by the networking device when
+ * it prepares AF_XDP egress packet. The value of @compl should be stored
+ * and passed to xsk_tx_metadata_complete upon TX completion.
+ */
+static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
+ struct xsk_tx_metadata_compl *compl)
+{
+ if (!meta)
+ return;
+
+ if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
+ compl->tx_timestamp = &meta->completion.tx_timestamp;
+ else
+ compl->tx_timestamp = NULL;
+}
+
+/**
+ * xsk_tx_metadata_request - Evaluate AF_XDP TX metadata at submission
+ * and call appropriate xsk_tx_metadata_ops operation.
+ * @meta: pointer to AF_XDP metadata area
+ * @ops: pointer to struct xsk_tx_metadata_ops
+ * @priv: pointer to driver-private aread
+ *
+ * This function should be called by the networking device when
+ * it prepares AF_XDP egress packet.
+ */
+static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+ if (!meta)
+ return;
+
+ if (ops->tmo_request_timestamp)
+ if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP)
+ ops->tmo_request_timestamp(priv);
+
+ if (ops->tmo_request_checksum)
+ if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM)
+ ops->tmo_request_checksum(meta->request.csum_start,
+ meta->request.csum_offset, priv);
+}
+
+/**
+ * xsk_tx_metadata_complete - Evaluate AF_XDP TX metadata at completion
+ * and call appropriate xsk_tx_metadata_ops operation.
+ * @compl: pointer to completion metadata produced from xsk_tx_metadata_to_compl
+ * @ops: pointer to struct xsk_tx_metadata_ops
+ * @priv: pointer to driver-private aread
+ *
+ * This function should be called by the networking device upon
+ * AF_XDP egress completion.
+ */
+static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+ if (!compl)
+ return;
+
+ *compl->tx_timestamp = ops->tmo_fill_timestamp(priv);
+}
+
#else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
@@ -98,6 +208,32 @@ static inline void __xsk_map_flush(void)
{
}
+static inline void xsk_tx_metadata_to_compl(struct xsk_tx_metadata *meta,
+ struct xsk_tx_metadata_compl *compl)
+{
+}
+
+static inline void xsk_tx_metadata_request(struct xsk_tx_metadata *meta,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+}
+
+static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl,
+ const struct xsk_tx_metadata_ops *ops,
+ void *priv)
+{
+}
+
#endif /* CONFIG_XDP_SOCKETS */
+#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET)
+bool xsk_map_check_flush(void);
+#else
+static inline bool xsk_map_check_flush(void)
+{
+ return false;
+}
+#endif
+
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 9c0d860609ba..c9aec9ab6191 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -12,6 +12,12 @@
#define XDP_UMEM_MIN_CHUNK_SHIFT 11
#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
+struct xsk_cb_desc {
+ void *src;
+ u8 off;
+ u8 bytes;
+};
+
#ifdef CONFIG_XDP_SOCKETS
void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
@@ -47,6 +53,12 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
xp_set_rxq_info(pool, rxq);
}
+static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
+ struct xsk_cb_desc *desc)
+{
+ xp_fill_cb(pool, desc);
+}
+
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -89,6 +101,11 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return xp_alloc(pool);
}
+static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
+{
+ return !xp_mb_desc(desc);
+}
+
/* Returns as many entries as possible up to max. 0 <= N <= max. */
static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
{
@@ -103,15 +120,68 @@ static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
static inline void xsk_buff_free(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+ struct list_head *xskb_list = &xskb->pool->xskb_list;
+ struct xdp_buff_xsk *pos, *tmp;
+
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+ list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
+ list_del(&pos->xskb_list_node);
+ xp_free(pos);
+ }
+
+ xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
+out:
xp_free(xskb);
}
+static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+{
+ struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+ list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
+}
+
+static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+{
+ struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
+ struct xdp_buff *ret = NULL;
+ struct xdp_buff_xsk *frag;
+
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+ struct xdp_buff_xsk, xskb_list_node);
+ if (frag) {
+ list_del(&frag->xskb_list_node);
+ ret = &frag->xdp;
+ }
+
+ return ret;
+}
+
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+ list_del(&xskb->xskb_list_node);
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+ struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
+ struct xdp_buff_xsk *frag;
+
+ frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+ xskb_list_node);
+ return &frag->xdp;
+}
+
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{
xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
xdp->data_meta = xdp->data;
xdp->data_end = xdp->data + size;
+ xdp->flags = 0;
}
static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
@@ -125,6 +195,30 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
return xp_raw_get_data(pool, addr);
}
+#define XDP_TXMD_FLAGS_VALID ( \
+ XDP_TXMD_FLAGS_TIMESTAMP | \
+ XDP_TXMD_FLAGS_CHECKSUM | \
+ 0)
+
+static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
+{
+ return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
+}
+
+static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+{
+ struct xsk_tx_metadata *meta;
+
+ if (!pool->tx_metadata_len)
+ return NULL;
+
+ meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
+ if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
+ return NULL; /* no way to signal the error to the user */
+
+ return meta;
+}
+
static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
@@ -210,6 +304,11 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
{
}
+static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
+ struct xsk_cb_desc *desc)
+{
+}
+
static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
{
return 0;
@@ -241,6 +340,11 @@ static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
return NULL;
}
+static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
+{
+ return false;
+}
+
static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
{
return 0;
@@ -255,10 +359,24 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
-static inline void xsk_buff_discard(struct xdp_buff *xdp)
+static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
{
}
+static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+{
+ return NULL;
+}
+
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+ return NULL;
+}
+
static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
{
}
@@ -274,6 +392,16 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
return NULL;
}
+static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
+{
+ return false;
+}
+
+static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
+{
+ return NULL;
+}
+
static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
}
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 151ca95dd08d..1d107241b901 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1207,20 +1207,20 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
}
-int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
unsigned int family, int reverse);
-static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
unsigned int family)
{
- return __xfrm_decode_session(skb, fl, family, 0);
+ return __xfrm_decode_session(net, skb, fl, family, 0);
}
-static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
+static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
struct flowi *fl,
unsigned int family)
{
- return __xfrm_decode_session(skb, fl, family, 1);
+ return __xfrm_decode_session(net, skb, fl, family, 1);
}
int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
@@ -1296,7 +1296,7 @@ static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *sk
{
return 1;
}
-static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
+static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
struct flowi *fl,
unsigned int family)
{
@@ -1669,7 +1669,6 @@ int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
#endif
void xfrm_local_error(struct sk_buff *skb, int mtu);
-int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
int encap_type);
int xfrm4_transport_finish(struct sk_buff *skb, int async);
@@ -1689,7 +1688,6 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char prot
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
-int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
struct ip6_tnl *t);
int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -1712,6 +1710,10 @@ int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ struct sk_buff *skb);
+struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
+ struct sk_buff *skb);
int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
int optlen);
#else
@@ -1984,6 +1986,7 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
if (dev->xfrmdev_ops->xdo_dev_state_free)
dev->xfrmdev_ops->xdo_dev_state_free(x);
xso->dev = NULL;
+ xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
netdev_put(dev, &xso->dev_tracker);
}
}
@@ -2165,7 +2168,7 @@ static inline bool xfrm6_local_dontfrag(const struct sock *sk)
proto = sk->sk_protocol;
if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
- return inet6_sk(sk)->dontfrag;
+ return inet6_test_bit(DONTFRAG, sk);
return false;
}
@@ -2187,4 +2190,13 @@ static inline int register_xfrm_interface_bpf(void)
#endif
+#if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
+int register_xfrm_state_bpf(void);
+#else
+static inline int register_xfrm_state_bpf(void)
+{
+ return 0;
+}
+#endif
+
#endif /* _NET_XFRM_H */
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index d318c769b445..99dd7376df6a 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -12,6 +12,7 @@
struct xsk_buff_pool;
struct xdp_rxq_info;
+struct xsk_cb_desc;
struct xsk_queue;
struct xdp_desc;
struct xdp_umem;
@@ -29,9 +30,11 @@ struct xdp_buff_xsk {
struct xsk_buff_pool *pool;
u64 orig_addr;
struct list_head free_list_node;
+ struct list_head xskb_list_node;
};
#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
+#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t))
struct xsk_dma_map {
dma_addr_t *dma_pages;
@@ -54,6 +57,7 @@ struct xsk_buff_pool {
struct xdp_umem *umem;
struct work_struct work;
struct list_head free_list;
+ struct list_head xskb_list;
u32 heads_cnt;
u16 queue_id;
@@ -75,10 +79,12 @@ struct xsk_buff_pool {
u32 chunk_size;
u32 chunk_shift;
u32 frame_len;
+ u8 tx_metadata_len; /* inherited from umem */
u8 cached_need_wakeup;
bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
+ bool tx_sw_csum;
void *addrs;
/* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
* NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
@@ -130,6 +136,7 @@ static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_p
/* AF_XDP ZC drivers, via xdp_sock_buff.h */
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
+void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc);
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages);
void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
@@ -180,10 +187,15 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
if (likely(!cross_pg))
return false;
- return pool->dma_pages_cnt &&
+ return pool->dma_pages &&
!(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
}
+static inline bool xp_mb_desc(struct xdp_desc *desc)
+{
+ return desc->options & XDP_PKT_CONTD;
+}
+
static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
{
return addr & pool->chunk_mask;
@@ -226,4 +238,9 @@ static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
}
+static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool)
+{
+ return pool->tx_metadata_len > 0;
+}
+
#endif /* XSK_BUFF_POOL_H_ */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 2e3843b761e8..3f1b58d8b4bf 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -277,6 +277,8 @@ enum ib_port_capability_mask2_bits {
IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4,
IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5,
IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10,
+ IB_PORT_EXTENDED_SPEEDS2_SUP = 1 << 11,
+ IB_PORT_LINK_SPEED_XDR_SUP = 1 << 12,
};
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 95896472a82b..565a85044541 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -77,6 +77,13 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
{
__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
umem->sgt_append.sgt.nents, pgsz);
+ biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
+ biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
+}
+
+static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
+{
+ return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
}
/**
@@ -92,7 +99,7 @@ static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
*/
#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
- __rdma_block_iter_next(biter);)
+ __rdma_umem_block_iter_next(biter);)
#ifdef CONFIG_INFINIBAND_USER_MEM
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 1e7774ac808f..b7b6b58dd348 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -561,6 +561,7 @@ enum ib_port_speed {
IB_SPEED_EDR = 32,
IB_SPEED_HDR = 64,
IB_SPEED_NDR = 128,
+ IB_SPEED_XDR = 256,
};
enum ib_stat_flag {
@@ -607,7 +608,7 @@ struct rdma_hw_stats {
const struct rdma_stat_desc *descs;
unsigned long *is_disabled;
int num_counters;
- u64 value[];
+ u64 value[] __counted_by(num_counters);
};
#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
@@ -840,6 +841,7 @@ enum ib_rate {
IB_RATE_50_GBPS = 20,
IB_RATE_400_GBPS = 21,
IB_RATE_600_GBPS = 22,
+ IB_RATE_800_GBPS = 23,
};
/**
@@ -1094,7 +1096,7 @@ struct ib_qp_cap {
/*
* Maximum number of rdma_rw_ctx structures in flight at a time.
- * ib_create_qp() will calculate the right amount of neededed WRs
+ * ib_create_qp() will calculate the right amount of needed WRs
* and MRs based on this.
*/
u32 max_rdma_ctxs;
@@ -2608,6 +2610,8 @@ struct ib_device_ops {
int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
+ int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq);
+ int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq);
/* Device lifecycle callbacks */
/*
@@ -2846,6 +2850,7 @@ struct ib_block_iter {
/* internal states */
struct scatterlist *__sg; /* sg holding the current aligned block */
dma_addr_t __dma_addr; /* unaligned DMA address of this block */
+ size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */
unsigned int __sg_nents; /* number of SG entries */
unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
unsigned int __pg_bit; /* alignment of current block */
@@ -4440,8 +4445,6 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
-struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
-
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 03abd30e6c8c..2b22f153ef63 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -115,27 +115,6 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
void iw_destroy_cm_id(struct iw_cm_id *cm_id);
/**
- * iw_cm_bind_qp - Unbind the specified IW CM identifier and QP
- *
- * @cm_id: The IW CM idenfier to unbind from the QP.
- * @qp: The QP
- *
- * This is called by the provider when destroying the QP to ensure
- * that any references held by the IWCM are released. It may also
- * be called by the IWCM when destroying a CM_ID to that any
- * references held by the provider are released.
- */
-void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
-
-/**
- * iw_cm_get_qp - Return the ib_qp associated with a QPN
- *
- * @ib_device: The IB device
- * @qpn: The queue pair number
- */
-struct ib_qp *iw_cm_get_qp(struct ib_device *device, int qpn);
-
-/**
* iw_cm_listen - Listen for incoming connection requests on the
* specified IW CM id.
*
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 9d45a5b20316..06287de69cd2 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -436,8 +436,10 @@ struct uapi_definition {
}, \
##__VA_ARGS__
#define UAPI_DEF_CHAIN_OBJ_TREE_NAMED(_object_enum, ...) \
- UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, &UVERBS_OBJECT(_object_enum), \
- ##__VA_ARGS__)
+ UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, \
+ PTR_IF(IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS), \
+ &UVERBS_OBJECT(_object_enum)), \
+ ##__VA_ARGS__)
/*
* =======================================
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 9eb75683e012..9705b2a98e49 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -262,7 +262,7 @@ static inline void da_monitor_destroy_##name(void) \
/* \
* per-cpu monitor variables \
*/ \
-DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
+static DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
\
/* \
* da_get_monitor_##name - return current CPU monitor address \
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 159823e0afbf..f5257103fdb6 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -23,22 +23,12 @@
struct block_device;
-enum sas_class {
- SAS,
- EXPANDER
-};
-
enum sas_phy_role {
PHY_ROLE_NONE = 0,
PHY_ROLE_TARGET = 0x40,
PHY_ROLE_INITIATOR = 0x80,
};
-enum sas_phy_type {
- PHY_TYPE_PHYSICAL,
- PHY_TYPE_VIRTUAL
-};
-
/* The events are mnemonically described in sas_dump.c
* so when updating/adding events here, please also
* update the other file too.
@@ -258,7 +248,6 @@ struct asd_sas_port {
/* public: */
int id;
- enum sas_class class;
u8 sas_addr[SAS_ADDR_SIZE];
u8 attached_sas_addr[SAS_ADDR_SIZE];
enum sas_protocol iproto;
@@ -319,11 +308,9 @@ struct asd_sas_phy {
int enabled; /* must be set */
int id; /* must be set */
- enum sas_class class;
enum sas_protocol iproto;
enum sas_protocol tproto;
- enum sas_phy_type type;
enum sas_phy_role role;
enum sas_oob_mode oob_mode;
enum sas_linkrate linkrate;
@@ -346,11 +333,6 @@ struct asd_sas_phy {
void *lldd_phy; /* not touched by the sas_class_code */
};
-struct scsi_core {
- struct Scsi_Host *shost;
-
-};
-
enum sas_ha_state {
SAS_HA_REGISTERED,
SAS_HA_DRAINING,
@@ -371,12 +353,11 @@ struct sas_ha_struct {
struct mutex disco_mutex;
- struct scsi_core core;
+ struct Scsi_Host *shost;
/* public: */
char *sas_ha_name;
struct device *dev; /* should be set */
- struct module *lldd_module; /* should be set */
struct workqueue_struct *event_q;
struct workqueue_struct *disco_q;
@@ -423,8 +404,6 @@ cmd_to_domain_dev(struct scsi_cmnd *cmd)
return sdev_to_domain_dev(cmd->device);
}
-void sas_hash_addr(u8 *hashed, const u8 *sas_addr);
-
/* Before calling a notify event, LLDD should use this function
* when the link is severed (possibly from its tasklet).
* The idea is that the Class only reads those, while the LLDD,
@@ -544,12 +523,9 @@ struct sas_ata_task {
struct host_to_dev_fis fis;
u8 atapi_packet[16]; /* 0 if not ATAPI task */
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 dma_xfer:1; /* PIO:0 or DMA:1 */
u8 use_ncq:1;
- u8 set_affil_pol:1;
- u8 stp_affil_pol:1;
+ u8 return_fis_on_success:1;
u8 device_control_reg_update:1;
@@ -582,12 +558,8 @@ enum task_attribute {
};
struct sas_ssp_task {
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 LUN[8];
- u8 enable_first_burst:1;
enum task_attribute task_attr;
- u8 task_prio;
struct scsi_cmnd *cmd;
};
@@ -707,7 +679,6 @@ extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
extern void sas_resume_ha_no_sync(struct sas_ha_struct *sas_ha);
extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
-int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates);
int sas_phy_reset(struct sas_phy *phy, int hard_reset);
int sas_phy_enable(struct sas_phy *phy, int enable);
extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
@@ -725,22 +696,6 @@ extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
extern struct device_attribute dev_attr_phy_event_threshold;
-int sas_discover_root_expander(struct domain_device *);
-
-void sas_init_ex_attr(void);
-
-int sas_ex_revalidate_domain(struct domain_device *);
-
-void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
-void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *);
-void sas_discover_event(struct asd_sas_port *, enum discover_event ev);
-
-int sas_discover_end_dev(struct domain_device *);
-
-void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *);
-
-void sas_init_dev(struct domain_device *);
-
void sas_task_abort(struct sas_task *);
int sas_eh_abort_handler(struct scsi_cmnd *cmd);
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index ec093594ba53..4498f845b112 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -157,6 +157,9 @@ enum scsi_disposition {
#define SCSI_3 4 /* SPC */
#define SCSI_SPC_2 5
#define SCSI_SPC_3 6
+#define SCSI_SPC_4 7
+#define SCSI_SPC_5 8
+#define SCSI_SPC_6 14
/*
* INQ PERIPHERAL QUALIFIERS
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index c2cb5f69635c..526def14e7fb 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -52,6 +52,11 @@ struct scsi_pointer {
#define SCMD_TAGGED (1 << 0)
#define SCMD_INITIALIZED (1 << 1)
#define SCMD_LAST (1 << 2)
+/*
+ * libata uses SCSI EH to fetch sense data for successful commands.
+ * SCSI EH should not overwrite scmd->result when SCMD_FORCE_EH_SUCCESS is set.
+ */
+#define SCMD_FORCE_EH_SUCCESS (1 << 3)
#define SCMD_FAIL_IF_RECOVERING (1 << 4)
/* flags preserved across unprep / reprep */
#define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED | SCMD_FAIL_IF_RECOVERING)
diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h
index 5b567b43e1b1..fb58715fac86 100644
--- a/include/scsi/scsi_common.h
+++ b/include/scsi/scsi_common.h
@@ -7,8 +7,21 @@
#define _SCSI_COMMON_H_
#include <linux/types.h>
+#include <uapi/linux/pr.h>
#include <scsi/scsi_proto.h>
+enum scsi_pr_type {
+ SCSI_PR_WRITE_EXCLUSIVE = 0x01,
+ SCSI_PR_EXCLUSIVE_ACCESS = 0x03,
+ SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 0x05,
+ SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 0x06,
+ SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 0x07,
+ SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 0x08,
+};
+
+enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type);
+enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type);
+
static inline unsigned
scsi_varlen_cdb_length(const void *hdr)
{
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index f10a008e5bfa..c38f4fe5e64c 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -100,10 +100,6 @@ struct scsi_vpd {
unsigned char data[];
};
-enum scsi_vpd_parameters {
- SCSI_VPD_HEADER_SIZE = 4,
-};
-
struct scsi_device {
struct Scsi_Host *host;
struct request_queue *request_queue;
@@ -161,6 +157,32 @@ struct scsi_device {
* pass settings from slave_alloc to scsi
* core. */
unsigned int eh_timeout; /* Error handling timeout */
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system suspend/resume (suspend to RAM and
+ * hibernation) operations.
+ */
+ unsigned manage_system_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for runtime device suspand and resume operations.
+ */
+ unsigned manage_runtime_start_stop:1;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system shutdown (power off) operations.
+ */
+ unsigned manage_shutdown:1;
+
+ /*
+ * If set and if the device is runtime suspended, ask the high-level
+ * device driver (sd) to force a runtime resume of the device.
+ */
+ unsigned force_runtime_start_on_system_start:1;
+
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
unsigned busy:1; /* Used to prevent races */
@@ -182,6 +204,7 @@ struct scsi_device {
unsigned use_10_for_rw:1; /* first try 10-byte read / write */
unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
+ unsigned read_before_ms:1; /* perform a READ before MODE SENSE */
unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
unsigned no_write_same:1; /* no WRITE SAME command */
unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
@@ -193,7 +216,6 @@ struct scsi_device {
unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
unsigned no_start_on_add:1; /* do not issue start on add */
unsigned allow_restart:1; /* issue START_UNIT in error handler */
- unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */
unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
unsigned select_no_atn:1;
@@ -218,6 +240,9 @@ struct scsi_device {
unsigned silence_suspend:1; /* Do not print runtime PM related messages */
unsigned no_vpd_size:1; /* No VPD size reported in header */
+ unsigned cdl_supported:1; /* Command duration limits supported */
+ unsigned cdl_enable:1; /* Enable/disable Command duration limits */
+
unsigned int queue_stopped; /* request queue is quiesced */
bool offline_already; /* Device offline message logged */
@@ -364,6 +389,8 @@ extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh);
extern void scsi_remove_device(struct scsi_device *);
extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
void scsi_attach_vpd(struct scsi_device *sdev);
+void scsi_cdl_check(struct scsi_device *sdev);
+int scsi_cdl_enable(struct scsi_device *sdev, bool enable);
extern struct scsi_device *scsi_device_from_queue(struct request_queue *q);
extern int __must_check scsi_device_get(struct scsi_device *);
@@ -421,10 +448,10 @@ extern int scsi_track_queue_full(struct scsi_device *, int);
extern int scsi_set_medium_removal(struct scsi_device *, char);
-extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
- unsigned char *buffer, int len, int timeout,
- int retries, struct scsi_mode_data *data,
- struct scsi_sense_hdr *);
+int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
+ int subpage, unsigned char *buffer, int len, int timeout,
+ int retries, struct scsi_mode_data *data,
+ struct scsi_sense_hdr *);
extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
unsigned char *buffer, int len, int timeout,
int retries, struct scsi_mode_data *data,
@@ -433,8 +460,9 @@ extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
int retries, struct scsi_sense_hdr *sshdr);
extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf,
int buf_len);
-extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
- unsigned int len, unsigned char opcode);
+int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+ unsigned int len, unsigned char opcode,
+ unsigned short sa);
extern int scsi_device_set_state(struct scsi_device *sdev,
enum scsi_device_state state);
extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
@@ -450,7 +478,7 @@ extern void scsi_scan_target(struct device *parent, unsigned int channel,
unsigned int id, u64 lun,
enum scsi_scan_mode rescan);
extern void scsi_target_reap(struct scsi_target *);
-extern void scsi_target_block(struct device *);
+void scsi_block_targets(struct Scsi_Host *shost, struct device *dev);
extern void scsi_target_unblock(struct device *, enum scsi_device_state);
extern void scsi_remove_target(struct device *);
extern const char *scsi_device_state_name(enum scsi_device_state);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 0f29799efa02..3b907fc2ef08 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -245,6 +245,9 @@ struct scsi_host_template {
* midlayer calls this point so that the driver may deallocate
* and terminate any references to the target.
*
+ * Note: This callback is called with the host lock held and hence
+ * must not sleep.
+ *
* Status: OPTIONAL
*/
void (* target_destroy)(struct scsi_target *);
@@ -458,6 +461,9 @@ struct scsi_host_template {
/* True if the host uses host-wide tagspace */
unsigned host_tagset:1;
+ /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
+ unsigned queuecommand_may_block:1;
+
/*
* Countdown for host blocking with no commands outstanding.
*/
@@ -653,6 +659,9 @@ struct Scsi_Host {
/* True if the host uses host-wide tagspace */
unsigned host_tagset:1;
+ /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
+ unsigned queuecommand_may_block:1;
+
/* Host responded with short (<36 bytes) INQUIRY result */
unsigned short_inquiry:1;
@@ -758,12 +767,12 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
#define scsi_template_proc_dir(sht) NULL
#endif
extern void scsi_scan_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct device *);
+extern int scsi_rescan_device(struct scsi_device *sdev);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
-extern struct Scsi_Host *scsi_host_lookup(unsigned short);
+extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
extern const char *scsi_host_state_name(enum scsi_host_state);
extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
enum scsi_host_status status);
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index beac64e38b87..a207c07da9d2 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -45,11 +45,11 @@ typedef struct scsi_fctargaddress {
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
int cmd, bool ndelay);
-int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd,
+int scsi_ioctl(struct scsi_device *sdev, bool open_for_write, int cmd,
void __user *arg);
int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
-bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode);
+bool scsi_cmd_allowed(unsigned char *cmd, bool open_for_write);
#endif /* __KERNEL__ */
#endif /* _SCSI_IOCTL_H */
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index fbe5bdfe4d6e..07d65c1f59db 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -151,6 +151,11 @@
#define ZO_FINISH_ZONE 0x02
#define ZO_OPEN_ZONE 0x03
#define ZO_RESET_WRITE_POINTER 0x04
+/* values for PR in service action */
+#define READ_KEYS 0x00
+#define READ_RESERVATION 0x01
+#define REPORT_CAPABILITES 0x02
+#define READ_FULL_STATUS 0x03
/* values for variable length command */
#define XDREAD_32 0x03
#define XDWRITE_32 0x04
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 34c03707fb6e..fb3399e4cd29 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -472,7 +472,6 @@ extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
uint32_t iface_type,
uint32_t iface_num, int dd_size);
extern void iscsi_destroy_iface(struct iscsi_iface *iface);
-extern struct iscsi_iface *iscsi_lookup_iface(int handle);
extern char *iscsi_get_port_speed_name(struct Scsi_Host *shost);
extern char *iscsi_get_port_state_name(struct Scsi_Host *shost);
extern int iscsi_is_session_dev(const struct device *dev);
diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h
index 1d7071dc0bca..26b56a07bd1f 100644
--- a/include/soc/at91/atmel_tcb.h
+++ b/include/soc/at91/atmel_tcb.h
@@ -77,9 +77,6 @@ struct atmel_tc {
bool allocated;
};
-extern struct atmel_tc *atmel_tc_alloc(unsigned block);
-extern void atmel_tc_free(struct atmel_tc *tc);
-
/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
extern const u8 atmel_tc_divisors[5];
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index eb5079904cc8..af793f2a0ec4 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -258,7 +258,7 @@ static inline int qe_alive_during_sleep(void)
/* Structure that defines QE firmware binary files.
*
- * See Documentation/powerpc/qe_firmware.rst for a description of these
+ * See Documentation/arch/powerpc/qe_firmware.rst for a description of these
* fields.
*/
struct qe_firmware {
diff --git a/include/soc/fsl/qe/qmc.h b/include/soc/fsl/qe/qmc.h
index 3c61a50d2ae2..2a333fc1ea81 100644
--- a/include/soc/fsl/qe/qmc.h
+++ b/include/soc/fsl/qe/qmc.h
@@ -9,6 +9,7 @@
#ifndef __SOC_FSL_QMC_H__
#define __SOC_FSL_QMC_H__
+#include <linux/bits.h>
#include <linux/types.h>
struct device_node;
@@ -16,9 +17,11 @@ struct device;
struct qmc_chan;
struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name);
+struct qmc_chan *qmc_chan_get_bychild(struct device_node *np);
void qmc_chan_put(struct qmc_chan *chan);
struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev, struct device_node *np,
const char *phandle_name);
+struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev, struct device_node *np);
enum qmc_mode {
QMC_TRANSPARENT,
@@ -37,6 +40,16 @@ struct qmc_chan_info {
int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info);
+struct qmc_chan_ts_info {
+ u64 rx_ts_mask_avail;
+ u64 tx_ts_mask_avail;
+ u64 rx_ts_mask;
+ u64 tx_ts_mask;
+};
+
+int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info);
+int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info);
+
struct qmc_chan_param {
enum qmc_mode mode;
union {
@@ -56,8 +69,20 @@ int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param
int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
void (*complete)(void *context), void *context);
+/* Flags available (ORed) for read complete() flags parameter in HDLC mode.
+ * No flags are available in transparent mode and the read complete() flags
+ * parameter has no meaning in transparent mode.
+ */
+#define QMC_RX_FLAG_HDLC_LAST BIT(11) /* Last in frame */
+#define QMC_RX_FLAG_HDLC_FIRST BIT(10) /* First in frame */
+#define QMC_RX_FLAG_HDLC_OVF BIT(5) /* Data overflow */
+#define QMC_RX_FLAG_HDLC_UNA BIT(4) /* Unaligned (ie. bits received not multiple of 8) */
+#define QMC_RX_FLAG_HDLC_ABORT BIT(3) /* Received an abort sequence (seven consecutive ones) */
+#define QMC_RX_FLAG_HDLC_CRC BIT(2) /* CRC error */
+
int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
- void (*complete)(void *context, size_t length),
+ void (*complete)(void *context, size_t length,
+ unsigned int flags),
void *context);
#define QMC_CHAN_READ (1<<0)
diff --git a/include/soc/imx/revision.h b/include/soc/imx/revision.h
index b2a55dafaf0a..b122d2fc8881 100644
--- a/include/soc/imx/revision.h
+++ b/include/soc/imx/revision.h
@@ -22,6 +22,7 @@
#define IMX_CHIP_REVISION_3_3 0x33
#define IMX_CHIP_REVISION_UNKNOWN 0xff
+int mx25_revision(void);
int mx27_revision(void);
int mx31_revision(void);
int mx35_revision(void);
diff --git a/include/soc/imx/timer.h b/include/soc/imx/timer.h
deleted file mode 100644
index 25f29c6bbd0b..000000000000
--- a/include/soc/imx/timer.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2015 Linaro Ltd.
- */
-
-#ifndef __SOC_IMX_TIMER_H__
-#define __SOC_IMX_TIMER_H__
-
-enum imx_gpt_type {
- GPT_TYPE_IMX1, /* i.MX1 */
- GPT_TYPE_IMX21, /* i.MX21/27 */
- GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */
- GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */
-};
-
-#endif /* __SOC_IMX_TIMER_H__ */
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index dfd8efca5e60..000eb1cf68b7 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -13,6 +13,7 @@
enum iommu_atf_cmd {
IOMMU_ATF_CMD_CONFIG_SMI_LARB, /* For mm master to en/disable iommu */
+ IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, /* For infra master to enable iommu */
IOMMU_ATF_CMD_MAX,
};
diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h
index f916dcde457f..09722f83b0ca 100644
--- a/include/soc/microchip/mpfs.h
+++ b/include/soc/microchip/mpfs.h
@@ -38,6 +38,8 @@ int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, struct mp
struct mpfs_sys_controller *mpfs_sys_controller_get(struct device *dev);
+struct mtd_info *mpfs_sys_controller_get_flash(struct mpfs_sys_controller *mpfs_client);
+
#endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */
#if IS_ENABLED(CONFIG_MCHP_CLK_MPFS)
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index cb8fbb241879..1e1b40f4e664 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -663,6 +663,7 @@ struct ocelot_ops {
struct flow_stats *stats);
void (*cut_through_fwd)(struct ocelot *ocelot);
void (*tas_clock_adjust)(struct ocelot *ocelot);
+ void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
void (*update_stats)(struct ocelot *ocelot);
};
@@ -730,6 +731,11 @@ enum macaccess_entry_type {
ENTRYTYPE_MACv6,
};
+enum ocelot_proto {
+ OCELOT_PROTO_PTP_L2 = BIT(0),
+ OCELOT_PROTO_PTP_L4 = BIT(1),
+};
+
#define OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION BIT(0)
#define OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP BIT(1)
@@ -775,6 +781,8 @@ struct ocelot_port {
unsigned int ptp_skbs_in_flight;
struct sk_buff_head tx_skbs;
+ unsigned int trap_proto;
+
u16 mrp_ring_id;
u8 ptp_cmd;
@@ -856,24 +864,21 @@ struct ocelot {
struct mutex stat_view_lock;
/* Lock for serializing access to the MAC table */
struct mutex mact_lock;
- /* Lock for serializing forwarding domain changes */
+ /* Lock for serializing forwarding domain changes, including the
+ * configuration of the Time-Aware Shaper, MAC Merge layer and
+ * cut-through forwarding, on which it depends
+ */
struct mutex fwd_domain_lock;
- /* Lock for serializing Time-Aware Shaper changes */
- struct mutex tas_lock;
-
struct workqueue_struct *owq;
u8 ptp:1;
u8 mm_supported:1;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_info;
- struct hwtstamp_config hwtstamp_config;
unsigned int ptp_skbs_in_flight;
/* Protects the 2-step TX timestamp ID logic */
spinlock_t ts_id_lock;
- /* Protects the PTP interface state */
- struct mutex ptp_lock;
/* Protects the PTP clock */
spinlock_t ptp_clock_lock;
struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM];
@@ -1160,7 +1165,6 @@ int ocelot_port_get_mm(struct ocelot *ocelot, int port,
struct ethtool_mm_state *state);
int ocelot_port_mqprio(struct ocelot *ocelot, int port,
struct tc_mqprio_qopt_offload *mqprio);
-void ocelot_port_update_preemptible_tcs(struct ocelot *ocelot, int port);
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
int ocelot_mrp_add(struct ocelot *ocelot, int port,
diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h
index c47cc71a999e..17a0a8c3d656 100644
--- a/include/soc/qcom/qcom-spmi-pmic.h
+++ b/include/soc/qcom/qcom-spmi-pmic.h
@@ -31,6 +31,7 @@
#define PM8998_SUBTYPE 0x14
#define PMI8998_SUBTYPE 0x15
#define PM8005_SUBTYPE 0x18
+#define PM8937_SUBTYPE 0x19
#define PM660L_SUBTYPE 0x1a
#define PM660_SUBTYPE 0x1b
#define PM8150_SUBTYPE 0x1e
diff --git a/include/soc/rockchip/rk3399_grf.h b/include/soc/rockchip/rk3399_grf.h
index 3eebabcb2812..39cd44cec982 100644
--- a/include/soc/rockchip/rk3399_grf.h
+++ b/include/soc/rockchip/rk3399_grf.h
@@ -11,11 +11,8 @@
/* PMU GRF Registers */
#define RK3399_PMUGRF_OS_REG2 0x308
-#define RK3399_PMUGRF_DDRTYPE_SHIFT 13
-#define RK3399_PMUGRF_DDRTYPE_MASK 7
-#define RK3399_PMUGRF_DDRTYPE_DDR3 3
-#define RK3399_PMUGRF_DDRTYPE_LPDDR2 5
-#define RK3399_PMUGRF_DDRTYPE_LPDDR3 6
-#define RK3399_PMUGRF_DDRTYPE_LPDDR4 7
+#define RK3399_PMUGRF_OS_REG2_DDRTYPE GENMASK(15, 13)
+#define RK3399_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2)
+#define RK3399_PMUGRF_OS_REG2_BW_CH1 GENMASK(19, 18)
#endif
diff --git a/include/soc/rockchip/rk3568_grf.h b/include/soc/rockchip/rk3568_grf.h
new file mode 100644
index 000000000000..52853efd6720
--- /dev/null
+++ b/include/soc/rockchip/rk3568_grf.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __SOC_RK3568_GRF_H
+#define __SOC_RK3568_GRF_H
+
+#define RK3568_PMUGRF_OS_REG2 0x208
+#define RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO GENMASK(15, 13)
+#define RK3568_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2)
+
+#define RK3568_PMUGRF_OS_REG3 0x20c
+#define RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12)
+#define RK3568_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28)
+
+#endif /* __SOC_RK3568_GRF_H */
diff --git a/include/soc/rockchip/rk3588_grf.h b/include/soc/rockchip/rk3588_grf.h
new file mode 100644
index 000000000000..630b35a55064
--- /dev/null
+++ b/include/soc/rockchip/rk3588_grf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef __SOC_RK3588_GRF_H
+#define __SOC_RK3588_GRF_H
+
+#define RK3588_PMUGRF_OS_REG2 0x208
+#define RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO GENMASK(15, 13)
+#define RK3588_PMUGRF_OS_REG2_BW_CH0 GENMASK(3, 2)
+#define RK3588_PMUGRF_OS_REG2_BW_CH1 GENMASK(19, 18)
+#define RK3588_PMUGRF_OS_REG2_CH_INFO GENMASK(29, 28)
+
+#define RK3588_PMUGRF_OS_REG3 0x20c
+#define RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12)
+#define RK3588_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28)
+
+#define RK3588_PMUGRF_OS_REG4 0x210
+#define RK3588_PMUGRF_OS_REG5 0x214
+
+#endif /* __SOC_RK3588_GRF_H */
diff --git a/include/soc/rockchip/rockchip_grf.h b/include/soc/rockchip/rockchip_grf.h
new file mode 100644
index 000000000000..e46fd72aea8d
--- /dev/null
+++ b/include/soc/rockchip/rockchip_grf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Rockchip General Register Files definitions
+ */
+
+#ifndef __SOC_ROCKCHIP_GRF_H
+#define __SOC_ROCKCHIP_GRF_H
+
+/* Rockchip DDRTYPE defines */
+enum {
+ ROCKCHIP_DDRTYPE_DDR3 = 3,
+ ROCKCHIP_DDRTYPE_LPDDR2 = 5,
+ ROCKCHIP_DDRTYPE_LPDDR3 = 6,
+ ROCKCHIP_DDRTYPE_LPDDR4 = 7,
+ ROCKCHIP_DDRTYPE_LPDDR4X = 8,
+};
+
+#endif /* __SOC_ROCKCHIP_GRF_H */
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
index ecefcaec7e66..6b995a8f0f6d 100644
--- a/include/soc/tegra/bpmp-abi.h
+++ b/include/soc/tegra/bpmp-abi.h
@@ -1194,7 +1194,7 @@ struct cmd_clk_is_enabled_request {
*/
struct cmd_clk_is_enabled_response {
/**
- * @brief The state of the clock that has been succesfully
+ * @brief The state of the clock that has been successfully
* requested with CMD_CLK_ENABLE or CMD_CLK_DISABLE by the
* master invoking the command earlier.
*
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
index 5842e38bb288..f5e4ac5b8cce 100644
--- a/include/soc/tegra/bpmp.h
+++ b/include/soc/tegra/bpmp.h
@@ -102,8 +102,12 @@ struct tegra_bpmp {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_mirror;
#endif
+
+ bool suspended;
};
+#define TEGRA_BPMP_MESSAGE_RESET BIT(0)
+
struct tegra_bpmp_message {
unsigned int mrq;
@@ -117,6 +121,8 @@ struct tegra_bpmp_message {
size_t size;
int ret;
} rx;
+
+ unsigned long flags;
};
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index a63de5da8124..3a513be50243 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2012-2023, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef __SOC_TEGRA_FUSE_H__
@@ -17,6 +17,7 @@
#define TEGRA186 0x18
#define TEGRA194 0x19
#define TEGRA234 0x23
+#define TEGRA264 0x26
#define TEGRA_FUSE_SKU_CALIB_0 0xf0
#define TEGRA30_FUSE_SATA_CALIB 0x124
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 51a2263e1bc5..af1d73a7f0cd 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
+#include <linux/tegra-icc.h>
struct clk;
struct device;
@@ -26,6 +27,8 @@ struct tegra_mc_timing {
struct tegra_mc_client {
unsigned int id;
+ unsigned int bpmp_id;
+ enum tegra_icc_client_type type;
const char *name;
/*
* For Tegra210 and earlier, this is the SWGROUP ID used for IOVA translations in the
@@ -93,7 +96,6 @@ struct tegra_smmu_soc {
struct tegra_mc;
struct tegra_smmu;
-struct gart_device;
#ifdef CONFIG_TEGRA_IOMMU_SMMU
struct tegra_smmu *tegra_smmu_probe(struct device *dev,
@@ -113,28 +115,6 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu)
}
#endif
-#ifdef CONFIG_TEGRA_IOMMU_GART
-struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc);
-int tegra_gart_suspend(struct gart_device *gart);
-int tegra_gart_resume(struct gart_device *gart);
-#else
-static inline struct gart_device *
-tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
-{
- return ERR_PTR(-ENODEV);
-}
-
-static inline int tegra_gart_suspend(struct gart_device *gart)
-{
- return -ENODEV;
-}
-
-static inline int tegra_gart_resume(struct gart_device *gart)
-{
- return -ENODEV;
-}
-#endif
-
struct tegra_mc_reset {
const char *name;
unsigned long id;
@@ -166,10 +146,15 @@ struct tegra_mc_icc_ops {
int (*set)(struct icc_node *src, struct icc_node *dst);
int (*aggregate)(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+ struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
struct icc_node_data *(*xlate_extended)(struct of_phandle_args *spec,
void *data);
+ int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
};
+struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data);
+extern const struct tegra_mc_icc_ops tegra_mc_icc_ops;
+
struct tegra_mc_ops {
/*
* @probe: Callback to set up SoC-specific bits of the memory controller. This is called
@@ -177,7 +162,6 @@ struct tegra_mc_ops {
*/
int (*probe)(struct tegra_mc *mc);
void (*remove)(struct tegra_mc *mc);
- int (*suspend)(struct tegra_mc *mc);
int (*resume)(struct tegra_mc *mc);
irqreturn_t (*handle_irq)(int irq, void *data);
int (*probe_device)(struct tegra_mc *mc, struct device *dev);
@@ -214,9 +198,9 @@ struct tegra_mc_soc {
};
struct tegra_mc {
+ struct tegra_bpmp *bpmp;
struct device *dev;
struct tegra_smmu *smmu;
- struct gart_device *gart;
void __iomem *regs;
void __iomem *bcast_ch_regs;
void __iomem **ch_regs;
@@ -228,7 +212,9 @@ struct tegra_mc {
struct tegra_mc_timing *timings;
unsigned int num_timings;
+ unsigned int num_channels;
+ bool bwmgr_mrq_supported;
struct reset_controller_dev reset;
struct icc_provider provider;
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index c495c6d5fbe0..4bd3be3a3192 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -410,7 +410,7 @@ int snd_ac97_pcm_close(struct ac97_pcm *pcm);
int snd_ac97_pcm_double_rate_rules(struct snd_pcm_runtime *runtime);
/* ad hoc AC97 device driver access */
-extern struct bus_type ac97_bus_type;
+extern const struct bus_type ac97_bus_type;
/* AC97 platform_data adding function */
static inline void snd_ac97_dev_add_pdata(struct snd_ac97 *ac97, void *data)
diff --git a/include/sound/asequencer.h b/include/sound/asequencer.h
index 18d4bc3ee0b7..ddbb6bf801bb 100644
--- a/include/sound/asequencer.h
+++ b/include/sound/asequencer.h
@@ -65,6 +65,10 @@
#define snd_seq_ev_is_abstime(ev) (snd_seq_ev_timemode_type(ev) == SNDRV_SEQ_TIME_MODE_ABS)
#define snd_seq_ev_is_reltime(ev) (snd_seq_ev_timemode_type(ev) == SNDRV_SEQ_TIME_MODE_REL)
+/* check whether the given event is a UMP event */
+#define snd_seq_ev_is_ump(ev) \
+ (IS_ENABLED(CONFIG_SND_SEQ_UMP) && ((ev)->flags & SNDRV_SEQ_EVENT_UMP))
+
/* queue sync port */
#define snd_seq_queue_sync_port(q) ((q) + 16)
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index d91289c6f00e..bcf872c17dd3 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -148,7 +148,7 @@ struct snd_compr_ops {
*/
struct snd_compr {
const char *name;
- struct device dev;
+ struct device *dev;
struct snd_compr_ops *ops;
void *private_data;
struct snd_card *card;
diff --git a/include/sound/control.h b/include/sound/control.h
index cc3dcc6cfb0f..9a4f4f7138da 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -140,8 +140,32 @@ int snd_ctl_remove_id(struct snd_card * card, struct snd_ctl_elem_id *id);
int snd_ctl_rename_id(struct snd_card * card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id);
void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl, const char *name);
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active);
-struct snd_kcontrol *snd_ctl_find_numid(struct snd_card * card, unsigned int numid);
-struct snd_kcontrol *snd_ctl_find_id(struct snd_card * card, struct snd_ctl_elem_id *id);
+struct snd_kcontrol *snd_ctl_find_numid_locked(struct snd_card *card, unsigned int numid);
+struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid);
+struct snd_kcontrol *snd_ctl_find_id_locked(struct snd_card *card, const struct snd_ctl_elem_id *id);
+struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, const struct snd_ctl_elem_id *id);
+
+/**
+ * snd_ctl_find_id_mixer - find the control instance with the given name string
+ * @card: the card instance
+ * @name: the name string
+ *
+ * Finds the control instance with the given name and
+ * @SNDRV_CTL_ELEM_IFACE_MIXER. Other fields are set to zero.
+ *
+ * This is merely a wrapper to snd_ctl_find_id().
+ *
+ * Return: The pointer of the instance if found, or %NULL if not.
+ */
+static inline struct snd_kcontrol *
+snd_ctl_find_id_mixer(struct snd_card *card, const char *name)
+{
+ struct snd_ctl_elem_id id = {};
+
+ id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ strscpy(id.name, name, sizeof(id.name));
+ return snd_ctl_find_id(card, &id);
+}
int snd_ctl_create(struct snd_card *card);
@@ -238,6 +262,9 @@ snd_ctl_add_follower(struct snd_kcontrol *master, struct snd_kcontrol *follower)
return _snd_ctl_add_follower(master, follower, 0);
}
+int snd_ctl_add_followers(struct snd_card *card, struct snd_kcontrol *master,
+ const char * const *list);
+
/**
* snd_ctl_add_follower_uncached - Add a virtual follower control
* @master: vmaster element
diff --git a/include/sound/core.h b/include/sound/core.h
index 3edc4ab08774..dfef0c9d4b9f 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -96,9 +96,9 @@ struct snd_card {
private data */
struct list_head devices; /* devices */
- struct device ctl_dev; /* control device */
+ struct device *ctl_dev; /* control device */
unsigned int last_numid; /* last used numeric ID */
- struct rw_semaphore controls_rwsem; /* controls list lock */
+ struct rw_semaphore controls_rwsem; /* controls lock (list and values) */
rwlock_t ctl_files_rwlock; /* ctl_files list lock */
int controls_count; /* count of all controls */
size_t user_ctl_alloc_size; // current memory allocation by user controls.
@@ -232,14 +232,14 @@ static inline struct device *snd_card_get_device_link(struct snd_card *card)
extern int snd_major;
extern int snd_ecards_limit;
-extern struct class *sound_class;
+extern const struct class sound_class;
#ifdef CONFIG_SND_DEBUG
extern struct dentry *sound_debugfs_root;
#endif
void snd_request_card(int card);
-void snd_device_initialize(struct device *dev, struct snd_card *card);
+int snd_device_alloc(struct device **dev_p, struct snd_card *card);
int snd_register_device(int type, struct snd_card *card, int dev,
const struct file_operations *f_ops,
diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
index 7239d943942c..68e053fe7340 100644
--- a/include/sound/cs35l41.h
+++ b/include/sound/cs35l41.h
@@ -11,7 +11,6 @@
#define __CS35L41_H
#include <linux/regmap.h>
-#include <linux/completion.h>
#include <linux/firmware/cirrus/cs_dsp.h>
#define CS35L41_FIRSTREG 0x00000000
@@ -736,6 +735,7 @@
#define CS35L41_REVID_B2 0xB2
#define CS35L41_HALO_CORE_RESET 0x00000200
+#define CS35L41_SOFTWARE_RESET 0x5A000000
#define CS35L41_FS1_WINDOW_MASK 0x000007FF
#define CS35L41_FS2_WINDOW_MASK 0x00FFF800
@@ -816,6 +816,8 @@ struct cs35l41_otp_map_element_t {
};
enum cs35l41_cspl_mbox_status {
+ CSPL_MBOX_STS_ERROR = U32_MAX,
+ CSPL_MBOX_STS_ERROR2 = 0x00ffffff, // firmware not always sign-extending 24-bit value
CSPL_MBOX_STS_RUNNING = 0,
CSPL_MBOX_STS_PAUSED = 1,
CSPL_MBOX_STS_RDY_FOR_REINIT = 2,
@@ -829,6 +831,7 @@ enum cs35l41_cspl_mbox_cmd {
CSPL_MBOX_CMD_STOP_PRE_REINIT = 4,
CSPL_MBOX_CMD_HIBERNATE = 5,
CSPL_MBOX_CMD_OUT_OF_HIBERNATE = 6,
+ CSPL_MBOX_CMD_SPK_OUT_ENABLE = 7,
CSPL_MBOX_CMD_UNKNOWN_CMD = -1,
CSPL_MBOX_CMD_INVALID_SEQUENCE = -2,
};
@@ -901,7 +904,8 @@ int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap);
int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
struct cs35l41_hw_cfg *hw_cfg);
bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
-int cs35l41_global_enable(struct regmap *regmap, enum cs35l41_boost_type b_type, int enable,
- struct completion *pll_lock);
+int cs35l41_mdsync_up(struct regmap *regmap);
+int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
+ int enable, struct cs_dsp *dsp);
#endif /* __CS35L41_H */
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 002042b1c73c..b24716ab2750 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -75,6 +75,7 @@
#define CS35L56_DSP1_AHBM_WINDOW_DEBUG_0 0x25E2040
#define CS35L56_DSP1_AHBM_WINDOW_DEBUG_1 0x25E2044
#define CS35L56_DSP1_XMEM_UNPACKED24_0 0x2800000
+#define CS35L56_DSP1_FW_VER 0x2800010
#define CS35L56_DSP1_HALO_STATE_A1 0x2801E58
#define CS35L56_DSP1_HALO_STATE 0x28021E0
#define CS35L56_DSP1_PM_CUR_STATE_A1 0x2804000
@@ -223,6 +224,7 @@
#define CS35L56_MBOX_CMD_AUDIO_PLAY 0x0B000001
#define CS35L56_MBOX_CMD_AUDIO_PAUSE 0x0B000002
+#define CS35L56_MBOX_CMD_AUDIO_REINIT 0x0B000003
#define CS35L56_MBOX_CMD_HIBERNATE_NOW 0x02000001
#define CS35L56_MBOX_CMD_WAKEUP 0x02000002
#define CS35L56_MBOX_CMD_PREVENT_AUTO_HIBERNATE 0x02000003
@@ -240,10 +242,9 @@
#define CS35L56_CONTROL_PORT_READY_US 2200
#define CS35L56_HALO_STATE_POLL_US 1000
-#define CS35L56_HALO_STATE_TIMEOUT_US 50000
-#define CS35L56_HIBERNATE_WAKE_POLL_US 500
-#define CS35L56_HIBERNATE_WAKE_TIMEOUT_US 5000
+#define CS35L56_HALO_STATE_TIMEOUT_US 250000
#define CS35L56_RESET_PULSE_MIN_US 1100
+#define CS35L56_WAKE_HOLD_TIME_US 1000
#define CS35L56_SDW1_PLAYBACK_PORT 1
#define CS35L56_SDW1_CAPTURE_PORT 3
@@ -251,15 +252,44 @@
#define CS35L56_NUM_BULK_SUPPLIES 3
#define CS35L56_NUM_DSP_REGIONS 5
+struct cs35l56_base {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct mutex irq_lock;
+ u8 rev;
+ bool init_done;
+ bool fw_patched;
+ bool secured;
+ bool can_hibernate;
+ struct gpio_desc *reset_gpio;
+};
+
extern struct regmap_config cs35l56_regmap_i2c;
extern struct regmap_config cs35l56_regmap_spi;
extern struct regmap_config cs35l56_regmap_sdw;
-extern const struct cs_dsp_region cs35l56_dsp1_regions[CS35L56_NUM_DSP_REGIONS];
extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
-void cs35l56_reread_firmware_registers(struct device *dev, struct regmap *regmap);
+int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
+int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base);
+int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
+int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
+int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base);
+void cs35l56_wait_control_port_ready(void);
+void cs35l56_wait_min_reset_pulse(void);
+void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire);
+int cs35l56_irq_request(struct cs35l56_base *cs35l56_base, int irq);
+irqreturn_t cs35l56_irq(int irq, void *data);
+int cs35l56_is_fw_reload_needed(struct cs35l56_base *cs35l56_base);
+int cs35l56_runtime_suspend_common(struct cs35l56_base *cs35l56_base);
+int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_soundwire);
+void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
+int cs35l56_read_prot_status(struct cs35l56_base *cs35l56_base,
+ bool *fw_missing, unsigned int *fw_version);
+int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
+int cs35l56_get_speaker_id(struct cs35l56_base *cs35l56_base);
int cs35l56_get_bclk_freq_id(unsigned int freq);
void cs35l56_fill_supply_names(struct regulator_bulk_data *data);
diff --git a/include/sound/cs4271.h b/include/sound/cs4271.h
index 6ff23ab48894..5a55d135bdea 100644
--- a/include/sound/cs4271.h
+++ b/include/sound/cs4271.h
@@ -9,7 +9,6 @@
#define __CS4271_H
struct cs4271_platform_data {
- int gpio_nreset; /* GPIO driving Reset pin, if any */
bool amutec_eq_bmutec; /* flag to enable AMUTEC=BMUTEC */
/*
diff --git a/include/sound/cs42l43.h b/include/sound/cs42l43.h
new file mode 100644
index 000000000000..deb337fc4e8c
--- /dev/null
+++ b/include/sound/cs42l43.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CS42L43 CODEC driver external data
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_ASOC_EXT_H
+#define CS42L43_ASOC_EXT_H
+
+#define CS42L43_SYSCLK 0
+
+#define CS42L43_SYSCLK_MCLK 0
+#define CS42L43_SYSCLK_SDW 1
+
+#endif /* CS42L43_ASOC_EXT_H */
diff --git a/include/sound/da7219-aad.h b/include/sound/da7219-aad.h
index 24ee7baa2589..41320522daa2 100644
--- a/include/sound/da7219-aad.h
+++ b/include/sound/da7219-aad.h
@@ -44,6 +44,11 @@ enum da7219_aad_jack_ins_deb {
DA7219_AAD_JACK_INS_DEB_1S,
};
+enum da7219_aad_jack_ins_det_pty {
+ DA7219_AAD_JACK_INS_DET_PTY_LOW = 0,
+ DA7219_AAD_JACK_INS_DET_PTY_HIGH,
+};
+
enum da7219_aad_jack_det_rate {
DA7219_AAD_JACK_DET_RATE_32_64MS = 0,
DA7219_AAD_JACK_DET_RATE_64_128MS,
@@ -80,6 +85,7 @@ struct da7219_aad_pdata {
enum da7219_aad_btn_cfg btn_cfg;
enum da7219_aad_mic_det_thr mic_det_thr;
enum da7219_aad_jack_ins_deb jack_ins_deb;
+ enum da7219_aad_jack_ins_det_pty jack_ins_det_pty;
enum da7219_aad_jack_det_rate jack_det_rate;
enum da7219_aad_jack_rem_deb jack_rem_deb;
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h
index 80d275b9ae0d..f6803205a9fb 100644
--- a/include/sound/designware_i2s.h
+++ b/include/sound/designware_i2s.h
@@ -21,6 +21,8 @@ struct i2s_clk_config_data {
u32 sample_rate;
};
+struct dw_i2s_dev;
+
struct i2s_platform_data {
#define DWC_I2S_PLAY (1 << 0)
#define DWC_I2S_RECORD (1 << 1)
@@ -42,6 +44,7 @@ struct i2s_platform_data {
void *capture_dma_data;
bool (*filter)(struct dma_chan *chan, void *slave);
int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
+ int (*i2s_pd_init)(struct dw_i2s_dev *dev);
};
struct i2s_dma_data {
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 2df54cf02cb3..d70c55f17df7 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -142,7 +142,7 @@ struct snd_dmaengine_pcm_config {
struct snd_pcm_substream *substream);
int (*process)(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- void *buf, unsigned long bytes);
+ unsigned long bytes);
dma_filter_fn compat_filter_fn;
struct device *dma_dev;
const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 8fe80dcee71b..1af9e6819392 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -38,6 +38,35 @@
#define IP_TO_CP(ip) ((ip == 0) ? 0 : (((0x00001000uL | (ip & 0x00000FFFL)) << (((ip >> 12) & 0x000FL) + 4)) & 0xFFFF0000uL))
+// This is used to define hardware bit-fields (sub-registers) by combining
+// the bit shift and count with the actual register address. The passed
+// mask must represent a single run of adjacent bits.
+// The non-concatenating (_NC) variant should be used directly only for
+// sub-registers that do not follow the <register>_<field> naming pattern.
+#define SUB_REG_NC(reg, field, mask) \
+ enum { \
+ field ## _MASK = mask, \
+ field = reg | \
+ (__builtin_ctz(mask) << 16) | \
+ (__builtin_popcount(mask) << 24), \
+ };
+#define SUB_REG(reg, field, mask) SUB_REG_NC(reg, reg ## _ ## field, mask)
+
+// Macros for manipulating values of bit-fields declared using the above macros.
+// Best used with constant register addresses, as otherwise quite some code is
+// generated. The actual register read/write functions handle combined addresses
+// automatically, so use of these macros conveys no advantage when accessing a
+// single sub-register at a time.
+#define REG_SHIFT(r) (((r) >> 16) & 0x1f)
+#define REG_SIZE(r) (((r) >> 24) & 0x1f)
+#define REG_MASK0(r) ((1U << REG_SIZE(r)) - 1U)
+#define REG_MASK(r) (REG_MASK0(r) << REG_SHIFT(r))
+#define REG_VAL_GET(r, v) ((v & REG_MASK(r)) >> REG_SHIFT(r))
+#define REG_VAL_PUT(r, v) ((v) << REG_SHIFT(r))
+
+// List terminator for snd_emu10k1_ptr_write_multiple()
+#define REGLIST_END ~0
+
// Audigy specify registers are prefixed with 'A_'
/************************************************************************************************/
@@ -90,6 +119,10 @@
#define IPR_MIDITRANSBUFEMPTY 0x00000100 /* MIDI UART transmit buffer empty */
#define IPR_MIDIRECVBUFEMPTY 0x00000080 /* MIDI UART receive buffer empty */
#define IPR_CHANNELLOOP 0x00000040 /* Channel (half) loop interrupt(s) pending */
+ /* The interrupt is triggered shortly after */
+ /* CCR_READADDRESS has crossed the boundary; */
+ /* due to the cache, this runs ahead of the */
+ /* actual playback position. */
#define IPR_CHANNELNUMBERMASK 0x0000003f /* When IPR_CHANNELLOOP is set, indicates the */
/* highest set channel in CLIPL, CLIPH, HLIPL, */
/* or HLIPH. When IPR is written with CL set, */
@@ -148,12 +181,10 @@
#define INTE_MIDIRXENABLE 0x00000001 /* Enable MIDI receive-buffer-empty interrupts */
#define WC 0x10 /* Wall Clock register */
-#define WC_SAMPLECOUNTER_MASK 0x03FFFFC0 /* Sample periods elapsed since reset */
-#define WC_SAMPLECOUNTER 0x14060010
-#define WC_CURRENTCHANNEL_MASK 0x0000003F /* Channel [0..63] currently being serviced */
+SUB_REG(WC, SAMPLECOUNTER, 0x03FFFFC0) /* Sample periods elapsed since reset */
+SUB_REG(WC, CURRENTCHANNEL, 0x0000003F) /* Channel [0..63] currently being serviced */
/* NOTE: Each channel takes 1/64th of a sample */
/* period to be serviced. */
-#define WC_CURRENTCHANNEL 0x06000010
#define HCFG 0x14 /* Hardware config register */
/* NOTE: There is no reason to use the legacy */
@@ -225,9 +256,8 @@
/* async audio source */
#define HCFG_LOCKSOUNDCACHE 0x00000008 /* 1 = Cancel bustmaster accesses to soundcache */
/* NOTE: This should generally never be used. */
-#define HCFG_LOCKTANKCACHE_MASK 0x00000004 /* 1 = Cancel bustmaster accesses to tankcache */
+SUB_REG(HCFG, LOCKTANKCACHE, 0x00000004) /* 1 = Cancel bustmaster accesses to tankcache */
/* NOTE: This should generally never be used. */
-#define HCFG_LOCKTANKCACHE 0x01020014
#define HCFG_MUTEBUTTONENABLE 0x00000002 /* 1 = Master mute button sets AUDIOENABLE = 0. */
/* NOTE: This is a 'cheap' way to implement a */
/* master mute function on the mute button, and */
@@ -381,56 +411,49 @@
// distortion), the modulation engine sets the target registers, towards
// which the current registers "swerve" gradually.
+// For the odd channel in a stereo pair, these registers are meaningless:
+// CPF_STEREO, CPF_CURRENTPITCH, PTRX_PITCHTARGET, CCR_CACHEINVALIDSIZE,
+// PSST_LOOPSTARTADDR, DSL_LOOPENDADDR, CCCA_CURRADDR
+// The somewhat non-obviously still meaningful ones are:
+// CPF_STOP, CPF_FRACADDRESS, CCR_READADDRESS (!),
+// CCCA_INTERPROM, CCCA_8BITSELECT (!)
+// (The envelope engine is ignored here, as stereo matters only for verbatim playback.)
+
#define CPF 0x00 /* Current pitch and fraction register */
-#define CPF_CURRENTPITCH_MASK 0xffff0000 /* Current pitch (linear, 0x4000 == unity pitch shift) */
-#define CPF_CURRENTPITCH 0x10100000
+SUB_REG(CPF, CURRENTPITCH, 0xffff0000) /* Current pitch (linear, 0x4000 == unity pitch shift) */
#define CPF_STEREO_MASK 0x00008000 /* 1 = Even channel interleave, odd channel locked */
-#define CPF_STOP_MASK 0x00004000 /* 1 = Current pitch forced to 0 */
+SUB_REG(CPF, STOP, 0x00004000) /* 1 = Current pitch forced to 0 */
+ /* Can be set only while matching bit in SOLEx is 1 */
#define CPF_FRACADDRESS_MASK 0x00003fff /* Linear fractional address of the current channel */
#define PTRX 0x01 /* Pitch target and send A/B amounts register */
-#define PTRX_PITCHTARGET_MASK 0xffff0000 /* Pitch target of specified channel */
-#define PTRX_PITCHTARGET 0x10100001
-#define PTRX_FXSENDAMOUNT_A_MASK 0x0000ff00 /* Linear level of channel output sent to FX send bus A */
-#define PTRX_FXSENDAMOUNT_A 0x08080001
-#define PTRX_FXSENDAMOUNT_B_MASK 0x000000ff /* Linear level of channel output sent to FX send bus B */
-#define PTRX_FXSENDAMOUNT_B 0x08000001
+SUB_REG(PTRX, PITCHTARGET, 0xffff0000) /* Pitch target of specified channel */
+SUB_REG(PTRX, FXSENDAMOUNT_A, 0x0000ff00) /* Linear level of channel output sent to FX send bus A */
+SUB_REG(PTRX, FXSENDAMOUNT_B, 0x000000ff) /* Linear level of channel output sent to FX send bus B */
+// Note: the volumes are raw multpliers, so real 100% is impossible.
#define CVCF 0x02 /* Current volume and filter cutoff register */
-#define CVCF_CURRENTVOL_MASK 0xffff0000 /* Current linear volume of specified channel */
-#define CVCF_CURRENTVOL 0x10100002
-#define CVCF_CURRENTFILTER_MASK 0x0000ffff /* Current filter cutoff frequency of specified channel */
-#define CVCF_CURRENTFILTER 0x10000002
+SUB_REG(CVCF, CURRENTVOL, 0xffff0000) /* Current linear volume of specified channel */
+SUB_REG(CVCF, CURRENTFILTER, 0x0000ffff) /* Current filter cutoff frequency of specified channel */
#define VTFT 0x03 /* Volume target and filter cutoff target register */
-#define VTFT_VOLUMETARGET_MASK 0xffff0000 /* Volume target of specified channel */
-#define VTFT_VOLUMETARGET 0x10100003
-#define VTFT_FILTERTARGET_MASK 0x0000ffff /* Filter cutoff target of specified channel */
-#define VTFT_FILTERTARGET 0x10000003
+SUB_REG(VTFT, VOLUMETARGET, 0xffff0000) /* Volume target of specified channel */
+SUB_REG(VTFT, FILTERTARGET, 0x0000ffff) /* Filter cutoff target of specified channel */
#define Z1 0x05 /* Filter delay memory 1 register */
#define Z2 0x04 /* Filter delay memory 2 register */
#define PSST 0x06 /* Send C amount and loop start address register */
-#define PSST_FXSENDAMOUNT_C_MASK 0xff000000 /* Linear level of channel output sent to FX send bus C */
-
-#define PSST_FXSENDAMOUNT_C 0x08180006
-
-#define PSST_LOOPSTARTADDR_MASK 0x00ffffff /* Loop start address of the specified channel */
-#define PSST_LOOPSTARTADDR 0x18000006
+SUB_REG(PSST, FXSENDAMOUNT_C, 0xff000000) /* Linear level of channel output sent to FX send bus C */
+SUB_REG(PSST, LOOPSTARTADDR, 0x00ffffff) /* Loop start address of the specified channel */
#define DSL 0x07 /* Send D amount and loop end address register */
-#define DSL_FXSENDAMOUNT_D_MASK 0xff000000 /* Linear level of channel output sent to FX send bus D */
-
-#define DSL_FXSENDAMOUNT_D 0x08180007
-
-#define DSL_LOOPENDADDR_MASK 0x00ffffff /* Loop end address of the specified channel */
-#define DSL_LOOPENDADDR 0x18000007
+SUB_REG(DSL, FXSENDAMOUNT_D, 0xff000000) /* Linear level of channel output sent to FX send bus D */
+SUB_REG(DSL, LOOPENDADDR, 0x00ffffff) /* Loop end address of the specified channel */
#define CCCA 0x08 /* Filter Q, interp. ROM, byte size, cur. addr register */
-#define CCCA_RESONANCE_MASK 0xf0000000 /* Lowpass filter resonance (Q) height */
-#define CCCA_RESONANCE 0x041c0008
+SUB_REG(CCCA, RESONANCE, 0xf0000000) /* Lowpass filter resonance (Q) height */
#define CCCA_INTERPROM_MASK 0x0e000000 /* Selects passband of interpolation ROM */
/* 1 == full band, 7 == lowpass */
/* ROM 0 is used when pitch shifting downward or less */
@@ -447,27 +470,24 @@
#define CCCA_INTERPROM_7 0x0e000000 /* Select interpolation ROM 7 */
#define CCCA_8BITSELECT 0x01000000 /* 1 = Sound memory for this channel uses 8-bit samples */
/* 8-bit samples are unsigned, 16-bit ones signed */
-#define CCCA_CURRADDR_MASK 0x00ffffff /* Current address of the selected channel */
-#define CCCA_CURRADDR 0x18000008
+SUB_REG(CCCA, CURRADDR, 0x00ffffff) /* Current address of the selected channel */
#define CCR 0x09 /* Cache control register */
-#define CCR_CACHEINVALIDSIZE 0x07190009
-#define CCR_CACHEINVALIDSIZE_MASK 0xfe000000 /* Number of invalid samples before the read address */
+SUB_REG(CCR, CACHEINVALIDSIZE, 0xfe000000) /* Number of invalid samples before the read address */
#define CCR_CACHELOOPFLAG 0x01000000 /* 1 = Cache has a loop service pending */
#define CCR_INTERLEAVEDSAMPLES 0x00800000 /* 1 = A cache service will fetch interleaved samples */
/* Auto-set from CPF_STEREO_MASK */
#define CCR_WORDSIZEDSAMPLES 0x00400000 /* 1 = A cache service will fetch word sized samples */
/* Auto-set from CCCA_8BITSELECT */
-#define CCR_READADDRESS 0x06100009
-#define CCR_READADDRESS_MASK 0x003f0000 /* Next cached sample to play */
-#define CCR_LOOPINVALSIZE 0x0000fe00 /* Number of invalid samples in cache prior to loop */
+SUB_REG(CCR, READADDRESS, 0x003f0000) /* Next cached sample to play */
+SUB_REG(CCR, LOOPINVALSIZE, 0x0000fe00) /* Number of invalid samples in cache prior to loop */
/* NOTE: This is valid only if CACHELOOPFLAG is set */
#define CCR_LOOPFLAG 0x00000100 /* Set for a single sample period when a loop occurs */
-#define CCR_CACHELOOPADDRHI 0x000000ff /* CLP_LOOPSTARTADDR's hi byte if CACHELOOPFLAG is set */
+SUB_REG(CCR, CACHELOOPADDRHI, 0x000000ff) /* CLP_LOOPSTARTADDR's hi byte if CACHELOOPFLAG is set */
#define CLP 0x0a /* Cache loop register (valid if CCR_CACHELOOPFLAG = 1) */
/* NOTE: This register is normally not used */
-#define CLP_CACHELOOPADDR 0x0000ffff /* Cache loop address low word */
+SUB_REG(CLP, CACHELOOPADDR, 0x0000ffff) /* Cache loop address low word */
#define FXRT 0x0b /* Effects send routing register */
/* NOTE: It is illegal to assign the same routing to */
@@ -537,20 +557,17 @@
#define IP_UNITY 0x0000e000 /* Unity pitch shift */
#define IFATN 0x19 /* Initial filter cutoff and attenuation register */
-#define IFATN_FILTERCUTOFF_MASK 0x0000ff00 /* Initial filter cutoff frequency in exponential units */
+SUB_REG(IFATN, FILTERCUTOFF, 0x0000ff00) /* Initial filter cutoff frequency in exponential units */
/* 6 most significant bits are semitones */
/* 2 least significant bits are fractions */
-#define IFATN_FILTERCUTOFF 0x08080019
-#define IFATN_ATTENUATION_MASK 0x000000ff /* Initial attenuation in 0.375dB steps */
-#define IFATN_ATTENUATION 0x08000019
+SUB_REG(IFATN, ATTENUATION, 0x000000ff) /* Initial attenuation in 0.375dB steps */
#define PEFE 0x1a /* Pitch envelope and filter envelope amount register */
-#define PEFE_PITCHAMOUNT_MASK 0x0000ff00 /* Pitch envlope amount */
+SUB_REG(PEFE, PITCHAMOUNT, 0x0000ff00) /* Pitch envlope amount */
/* Signed 2's complement, +/- one octave peak extremes */
-#define PEFE_PITCHAMOUNT 0x0808001a
-#define PEFE_FILTERAMOUNT_MASK 0x000000ff /* Filter envlope amount */
+SUB_REG(PEFE, FILTERAMOUNT, 0x000000ff) /* Filter envlope amount */
/* Signed 2's complement, +/- six octaves peak extremes */
-#define PEFE_FILTERAMOUNT 0x0800001a
+
#define FMMOD 0x1b /* Vibrato/filter modulation from LFO register */
#define FMMOD_MODVIBRATO 0x0000ff00 /* Vibrato LFO modulation depth */
@@ -577,24 +594,22 @@
/* 0x1f: not used */
-#define CD0 0x20 /* Cache data 0 register */
-#define CD1 0x21 /* Cache data 1 register */
-#define CD2 0x22 /* Cache data 2 register */
-#define CD3 0x23 /* Cache data 3 register */
-#define CD4 0x24 /* Cache data 4 register */
-#define CD5 0x25 /* Cache data 5 register */
-#define CD6 0x26 /* Cache data 6 register */
-#define CD7 0x27 /* Cache data 7 register */
-#define CD8 0x28 /* Cache data 8 register */
-#define CD9 0x29 /* Cache data 9 register */
-#define CDA 0x2a /* Cache data A register */
-#define CDB 0x2b /* Cache data B register */
-#define CDC 0x2c /* Cache data C register */
-#define CDD 0x2d /* Cache data D register */
-#define CDE 0x2e /* Cache data E register */
-#define CDF 0x2f /* Cache data F register */
-
-/* 0x30-3f seem to be the same as 0x20-2f */
+// 32 cache registers (== 128 bytes) per channel follow.
+// In stereo mode, the two channels' caches are concatenated into one,
+// and hold the interleaved frames.
+// The cache holds 64 frames, so the upper half is not used in 8-bit mode.
+// All registers mentioned below count in frames.
+// The cache is a ring buffer; CCR_READADDRESS operates modulo 64.
+// The cache is filled from (CCCA_CURRADDR - CCR_CACHEINVALIDSIZE)
+// into (CCR_READADDRESS - CCR_CACHEINVALIDSIZE).
+// The engine has a fetch threshold of 32 bytes, so it tries to keep
+// CCR_CACHEINVALIDSIZE below 8 (16-bit stereo), 16 (16-bit mono,
+// 8-bit stereo), or 32 (8-bit mono). The actual transfers are pretty
+// unpredictable, especially if several voices are running.
+// Frames are consumed at CCR_READADDRESS, which is incremented afterwards,
+// along with CCCA_CURRADDR and CCR_CACHEINVALIDSIZE. This implies that the
+// actual playback position always lags CCCA_CURRADDR by exactly 64 frames.
+#define CD0 0x20 /* Cache data registers 0 .. 0x1f */
#define PTB 0x40 /* Page table base register */
#define PTB_MASK 0xfffff000 /* Physical address of the page table in host memory */
@@ -695,6 +710,8 @@
#define ADCBS_BUFSIZE_57344 0x0000001e
#define ADCBS_BUFSIZE_65536 0x0000001f
+// On Audigy, the FX send amounts are not applied instantly, but determine
+// targets towards which the following registers swerve gradually.
#define A_CSBA 0x4c /* FX send B & A current amounts */
#define A_CSDC 0x4d /* FX send D & C current amounts */
#define A_CSFE 0x4e /* FX send F & E current amounts */
@@ -755,6 +772,9 @@
#define CLIPL 0x5a /* Channel loop interrupt pending low register */
#define CLIPH 0x5b /* Channel loop interrupt pending high register */
+// These cause CPF_STOP_MASK to be set shortly after CCCA_CURRADDR passes DSL_LOOPENDADDR.
+// Subsequent changes to the address registers don't resume; clearing the bit here or in CPF does.
+// The registers are NOT synchronized; the next serviced channel picks up immediately.
#define SOLEL 0x5c /* Stop on loop enable low register */
#define SOLEH 0x5d /* Stop on loop enable high register */
@@ -793,22 +813,19 @@
#define SRCS_SPDIFRATE_96 0x00080000
#define MICIDX 0x63 /* Microphone recording buffer index register */
-#define MICIDX_MASK 0x0000ffff /* 16-bit value */
-#define MICIDX_IDX 0x10000063
+SUB_REG(MICIDX, IDX, 0x0000ffff)
#define ADCIDX 0x64 /* ADC recording buffer index register */
-#define ADCIDX_MASK 0x0000ffff /* 16 bit index field */
-#define ADCIDX_IDX 0x10000064
+SUB_REG(ADCIDX, IDX, 0x0000ffff)
#define A_ADCIDX 0x63
-#define A_ADCIDX_IDX 0x10000063
+SUB_REG(A_ADCIDX, IDX, 0x0000ffff)
#define A_MICIDX 0x64
-#define A_MICIDX_IDX 0x10000064
+SUB_REG(A_MICIDX, IDX, 0x0000ffff)
#define FXIDX 0x65 /* FX recording buffer index register */
-#define FXIDX_MASK 0x0000ffff /* 16-bit value */
-#define FXIDX_IDX 0x10000065
+SUB_REG(FXIDX, IDX, 0x0000ffff)
/* The 32-bit HLIEx and HLIPx registers all have one bit per channel control/status */
#define HLIEL 0x66 /* Channel half loop interrupt enable low register */
@@ -852,8 +869,8 @@
#define A_SPDIF_44100 0x00000080
#define A_SPDIF_MUTED 0x000000c0
-#define A_I2S_CAPTURE_RATE_MASK 0x00000e00 /* This sets the capture PCM rate, but it is */
-#define A_I2S_CAPTURE_RATE 0x03090076 /* unclear if this sets the ADC rate as well. */
+SUB_REG_NC(A_EHC, A_I2S_CAPTURE_RATE, 0x00000e00) /* This sets the capture PCM rate, but it is */
+ /* unclear if this sets the ADC rate as well. */
#define A_I2S_CAPTURE_48000 0x0
#define A_I2S_CAPTURE_192000 0x1
#define A_I2S_CAPTURE_96000 0x2
@@ -885,6 +902,11 @@
#define A_TTDA 0x7a /* Tank Table DMA Address */
#define A_TTDD 0x7b /* Tank Table DMA Data */
+// In A_FXRT1 & A_FXRT2, the 0x80 bit of each byte completely disables the
+// filter (CVCF_CURRENTFILTER) for the corresponding channel. There is no
+// effect on the volume (CVCF_CURRENTVOLUME) or the interpolator's filter
+// (CCCA_INTERPROM_MASK).
+
#define A_FXRT2 0x7c
#define A_FXRT_CHANNELE 0x0000003f /* Effects send bus number for channel's effects send E */
#define A_FXRT_CHANNELF 0x00003f00 /* Effects send bus number for channel's effects send F */
@@ -897,8 +919,6 @@
#define A_FXSENDAMOUNT_G_MASK 0x0000FF00
#define A_FXSENDAMOUNT_H_MASK 0x000000FF
-/* 0x7c, 0x7e "high bit is used for filtering" */
-
/* The send amounts for this one are the same as used with the emu10k1 */
#define A_FXRT1 0x7e
#define A_FXRT_CHANNELA 0x0000003f
@@ -975,6 +995,9 @@
#define EMU_HANA_WCLOCK_4X 0x10
#define EMU_HANA_WCLOCK_MULT_RESERVED 0x18
+// If the selected external clock source is/becomes invalid or incompatible
+// with the clock multiplier, the clock source is reset to this value, and
+// a WCLK_CHANGED interrupt is raised.
#define EMU_HANA_DEFCLOCK 0x06 /* 000000x 1 bits Default Word Clock */
#define EMU_HANA_DEFCLOCK_48K 0x00
#define EMU_HANA_DEFCLOCK_44_1K 0x01
@@ -1093,6 +1116,9 @@
#define EMU_DOCK_BOARD_ID0 0x00 /* ID bit 0 */
#define EMU_DOCK_BOARD_ID1 0x03 /* ID bit 1 */
+// The actual code disagrees about the bit width of the registers -
+// the formula used is freq = 0x1770000 / (((X_HI << 5) | X_LO) + 1)
+
#define EMU_HANA_WC_SPDIF_HI 0x28 /* 0xxxxxx 6 bit SPDIF IN Word clock, upper 6 bits */
#define EMU_HANA_WC_SPDIF_LO 0x29 /* 0xxxxxx 6 bit SPDIF IN Word clock, lower 6 bits */
@@ -1189,9 +1215,10 @@
* physical outputs of Hana, or outputs going to Alice2/Tina for capture -
* 16 x EMU_DST_ALICE2_EMU32_X (2x on rev2 boards). Which data is fed into
* a channel depends on the mixer control setting for each destination - see
- * emumixer.c - snd_emu1010_output_enum_ctls[], snd_emu1010_input_enum_ctls[]
+ * the register arrays in emumixer.c.
*/
#define EMU_DST_ALICE2_EMU32_0 0x000f /* 16 EMU32 channels to Alice2 +0 to +0xf */
+ /* This channel is delayed by one sample. */
#define EMU_DST_ALICE2_EMU32_1 0x0000 /* 16 EMU32 channels to Alice2 +0 to +0xf */
#define EMU_DST_ALICE2_EMU32_2 0x0001 /* 16 EMU32 channels to Alice2 +0 to +0xf */
#define EMU_DST_ALICE2_EMU32_3 0x0002 /* 16 EMU32 channels to Alice2 +0 to +0xf */
@@ -1422,24 +1449,35 @@
/* 0x600 and 0x700 no used */
+
+/* ------------------- CONSTANTS -------------------- */
+
+extern const char * const snd_emu10k1_fxbus[32];
+extern const char * const snd_emu10k1_sblive_ins[16];
+extern const char * const snd_emu10k1_audigy_ins[16];
+extern const char * const snd_emu10k1_sblive_outs[32];
+extern const char * const snd_emu10k1_audigy_outs[32];
+extern const s8 snd_emu10k1_sblive51_fxbus2_map[16];
+
/* ------------------- STRUCTURES -------------------- */
enum {
+ EMU10K1_UNUSED, // This must be zero
EMU10K1_EFX,
+ EMU10K1_EFX_IRQ,
EMU10K1_PCM,
+ EMU10K1_PCM_IRQ,
EMU10K1_SYNTH,
- EMU10K1_MIDI
+ EMU10K1_NUM_TYPES
};
struct snd_emu10k1;
struct snd_emu10k1_voice {
- int number;
- unsigned int use: 1,
- pcm: 1,
- efx: 1,
- synth: 1,
- midi: 1;
+ unsigned char number;
+ unsigned char use;
+ unsigned char dirty;
+ unsigned char last;
void (*interrupt)(struct snd_emu10k1 *emu, struct snd_emu10k1_voice *pvoice);
struct snd_emu10k1_pcm *epcm;
@@ -1461,7 +1499,9 @@ struct snd_emu10k1_pcm {
struct snd_emu10k1_voice *extra;
unsigned short running;
unsigned short first_ptr;
+ snd_pcm_uframes_t resume_pos;
struct snd_util_memblk *memblk;
+ unsigned int pitch_target;
unsigned int start_addr;
unsigned int ccca_start_addr;
unsigned int capture_ipr; /* interrupt acknowledge mask */
@@ -1479,6 +1519,8 @@ struct snd_emu10k1_pcm_mixer {
/* mono, left, right x 8 sends (4 on emu10k1) */
unsigned char send_routing[3][8];
unsigned char send_volume[3][8];
+ // 0x8000 is neutral. The mixer code rescales it to 0xffff to maintain
+ // backwards compatibility with user space.
unsigned short attn[3];
struct snd_emu10k1_pcm *epcm;
};
@@ -1487,10 +1529,13 @@ struct snd_emu10k1_pcm_mixer {
((route[0] | (route[1] << 4) | (route[2] << 8) | (route[3] << 12)) << 16)
#define snd_emu10k1_compose_audigy_fxrt1(route) \
-((unsigned int)route[0] | ((unsigned int)route[1] << 8) | ((unsigned int)route[2] << 16) | ((unsigned int)route[3] << 24))
+((unsigned int)route[0] | ((unsigned int)route[1] << 8) | ((unsigned int)route[2] << 16) | ((unsigned int)route[3] << 24) | 0x80808080)
#define snd_emu10k1_compose_audigy_fxrt2(route) \
-((unsigned int)route[4] | ((unsigned int)route[5] << 8) | ((unsigned int)route[6] << 16) | ((unsigned int)route[7] << 24))
+((unsigned int)route[4] | ((unsigned int)route[5] << 8) | ((unsigned int)route[6] << 16) | ((unsigned int)route[7] << 24) | 0x80808080)
+
+#define snd_emu10k1_compose_audigy_sendamounts(vol) \
+(((unsigned int)vol[4] << 24) | ((unsigned int)vol[5] << 16) | ((unsigned int)vol[6] << 8) | (unsigned int)vol[7])
struct snd_emu10k1_memblk {
struct snd_util_memblk mem;
@@ -1510,9 +1555,9 @@ struct snd_emu10k1_fx8010_ctl {
unsigned int vcount;
unsigned int count; /* count of GPR (1..16) */
unsigned short gpr[32]; /* GPR number(s) */
- unsigned int value[32];
- unsigned int min; /* minimum range */
- unsigned int max; /* maximum range */
+ int value[32];
+ int min; /* minimum range */
+ int max; /* maximum range */
unsigned int translation; /* translation type (EMU10K1_GPR_TRANSLATION*) */
struct snd_kcontrol *kcontrol;
};
@@ -1600,39 +1645,47 @@ struct snd_emu_chip_details {
u32 device;
u32 subsystem;
unsigned char revision;
- unsigned char emu10k1_chip; /* Original SB Live. Not SB Live 24bit. */
- /* Redundant with emu10k2_chip being unset. */
- unsigned char emu10k2_chip; /* Audigy 1 or Audigy 2. */
- unsigned char ca0102_chip; /* Audigy 1 or Audigy 2. Not SB Audigy 2 Value. */
- /* Redundant with ca0108_chip being unset. */
- unsigned char ca0108_chip; /* Audigy 2 Value */
- unsigned char ca_cardbus_chip; /* Audigy 2 ZS Notebook */
- unsigned char ca0151_chip; /* P16V */
- unsigned char spk71; /* Has 7.1 speakers */
- unsigned char sblive51; /* SBLive! 5.1 - extout 0x11 -> center, 0x12 -> lfe */
- unsigned char spdif_bug; /* Has Spdif phasing bug */
- unsigned char ac97_chip; /* Has an AC97 chip: 1 = mandatory, 2 = optional */
- unsigned char ecard; /* APS EEPROM */
- unsigned char emu_model; /* EMU model type */
- unsigned char spi_dac; /* SPI interface for DAC; requires ca0108_chip */
- unsigned char i2c_adc; /* I2C interface for ADC; requires ca0108_chip */
- unsigned char adc_1361t; /* Use Philips 1361T ADC */
- unsigned char invert_shared_spdif; /* analog/digital switch inverted */
+ unsigned char emu_model; /* EMU model type */
+ unsigned int emu10k1_chip:1; /* Original SB Live. Not SB Live 24bit. */
+ /* Redundant with emu10k2_chip being unset. */
+ unsigned int emu10k2_chip:1; /* Audigy 1 or Audigy 2. */
+ unsigned int ca0102_chip:1; /* Audigy 1 or Audigy 2. Not SB Audigy 2 Value. */
+ /* Redundant with ca0108_chip being unset. */
+ unsigned int ca0108_chip:1; /* Audigy 2 Value */
+ unsigned int ca_cardbus_chip:1; /* Audigy 2 ZS Notebook */
+ unsigned int ca0151_chip:1; /* P16V */
+ unsigned int spk20:1; /* Stereo only */
+ unsigned int spk71:1; /* Has 7.1 speakers */
+ unsigned int no_adat:1; /* Has no ADAT, only SPDIF */
+ unsigned int sblive51:1; /* SBLive! 5.1 - extout 0x11 -> center, 0x12 -> lfe */
+ unsigned int spdif_bug:1; /* Has Spdif phasing bug */
+ unsigned int ac97_chip:2; /* Has an AC97 chip: 1 = mandatory, 2 = optional */
+ unsigned int ecard:1; /* APS EEPROM */
+ unsigned int spi_dac:1; /* SPI interface for DAC; requires ca0108_chip */
+ unsigned int i2c_adc:1; /* I2C interface for ADC; requires ca0108_chip */
+ unsigned int adc_1361t:1; /* Use Philips 1361T ADC */
+ unsigned int invert_shared_spdif:1; /* analog/digital switch inverted */
const char *driver;
const char *name;
const char *id; /* for backward compatibility - can be NULL if not needed */
};
+#define NUM_OUTPUT_DESTS 28
+#define NUM_INPUT_DESTS 22
+
struct snd_emu1010 {
- unsigned int output_source[64];
- unsigned int input_source[64];
+ unsigned char output_source[NUM_OUTPUT_DESTS];
+ unsigned char input_source[NUM_INPUT_DESTS];
unsigned int adc_pads; /* bit mask */
unsigned int dac_pads; /* bit mask */
- unsigned int internal_clock; /* 44100 or 48000 */
+ unsigned int wclock; /* Cached register value */
+ unsigned int word_clock; /* Cached effective value */
+ unsigned int clock_source;
+ unsigned int clock_fallback;
unsigned int optical_in; /* 0:SPDIF, 1:ADAT */
unsigned int optical_out; /* 0:SPDIF, 1:ADAT */
- struct delayed_work firmware_work;
- u32 last_reg;
+ struct work_struct firmware_work;
+ struct work_struct clock_work;
};
struct snd_emu10k1 {
@@ -1653,7 +1706,6 @@ struct snd_emu10k1 {
unsigned int address_mode; /* address mode */
unsigned long dma_mask; /* PCI DMA mask */
bool iommu_workaround; /* IOMMU workaround needed */
- unsigned int delay_pcm_irq; /* in samples */
int max_cache_pages; /* max memory size / PAGE_SIZE */
struct snd_dma_buffer silent_page; /* silent page */
struct snd_dma_buffer ptb_pages; /* page table pages */
@@ -1708,6 +1760,7 @@ struct snd_emu10k1 {
struct snd_kcontrol *ctl_efx_send_routing;
struct snd_kcontrol *ctl_efx_send_volume;
struct snd_kcontrol *ctl_efx_attn;
+ struct snd_kcontrol *ctl_clock_source;
void (*hwvol_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
void (*capture_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
@@ -1715,6 +1768,7 @@ struct snd_emu10k1 {
void (*capture_efx_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
void (*spdif_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
void (*dsp_interrupt)(struct snd_emu10k1 *emu);
+ void (*gpio_interrupt)(struct snd_emu10k1 *emu);
void (*p16v_interrupt)(struct snd_emu10k1 *emu);
struct snd_pcm_substream *pcm_capture_substream;
@@ -1775,6 +1829,7 @@ int snd_emu10k1_done(struct snd_emu10k1 * emu);
/* I/O functions */
unsigned int snd_emu10k1_ptr_read(struct snd_emu10k1 * emu, unsigned int reg, unsigned int chn);
void snd_emu10k1_ptr_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data);
+void snd_emu10k1_ptr_write_multiple(struct snd_emu10k1 *emu, unsigned int chn, ...);
unsigned int snd_emu10k1_ptr20_read(struct snd_emu10k1 * emu, unsigned int reg, unsigned int chn);
void snd_emu10k1_ptr20_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data);
int snd_emu10k1_spi_write(struct snd_emu10k1 * emu, unsigned int data);
@@ -1782,6 +1837,9 @@ int snd_emu10k1_i2c_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
void snd_emu1010_fpga_read(struct snd_emu10k1 *emu, u32 reg, u32 *value);
void snd_emu1010_fpga_link_dst_src_write(struct snd_emu10k1 *emu, u32 dst, u32 src);
+u32 snd_emu1010_fpga_link_dst_src_read(struct snd_emu10k1 *emu, u32 dst);
+int snd_emu1010_get_raw_rate(struct snd_emu10k1 *emu, u8 src);
+void snd_emu1010_update_clock(struct snd_emu10k1 *emu);
unsigned int snd_emu10k1_efx_read(struct snd_emu10k1 *emu, unsigned int pc);
void snd_emu10k1_intr_enable(struct snd_emu10k1 *emu, unsigned int intrenb);
void snd_emu10k1_intr_disable(struct snd_emu10k1 *emu, unsigned int intrenb);
@@ -1791,13 +1849,17 @@ void snd_emu10k1_voice_intr_ack(struct snd_emu10k1 *emu, unsigned int voicenum);
void snd_emu10k1_voice_half_loop_intr_enable(struct snd_emu10k1 *emu, unsigned int voicenum);
void snd_emu10k1_voice_half_loop_intr_disable(struct snd_emu10k1 *emu, unsigned int voicenum);
void snd_emu10k1_voice_half_loop_intr_ack(struct snd_emu10k1 *emu, unsigned int voicenum);
+#if 0
void snd_emu10k1_voice_set_loop_stop(struct snd_emu10k1 *emu, unsigned int voicenum);
void snd_emu10k1_voice_clear_loop_stop(struct snd_emu10k1 *emu, unsigned int voicenum);
+#endif
+void snd_emu10k1_voice_set_loop_stop_multiple(struct snd_emu10k1 *emu, u64 voices);
+void snd_emu10k1_voice_clear_loop_stop_multiple(struct snd_emu10k1 *emu, u64 voices);
+int snd_emu10k1_voice_clear_loop_stop_multiple_atomic(struct snd_emu10k1 *emu, u64 voices);
void snd_emu10k1_wait(struct snd_emu10k1 *emu, unsigned int wait);
static inline unsigned int snd_emu10k1_wc(struct snd_emu10k1 *emu) { return (inl(emu->port + WC) >> 6) & 0xfffff; }
unsigned short snd_emu10k1_ac97_read(struct snd_ac97 *ac97, unsigned short reg);
void snd_emu10k1_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short data);
-unsigned int snd_emu10k1_rate_to_pitch(unsigned int rate);
#ifdef CONFIG_PM_SLEEP
void snd_emu10k1_suspend_regs(struct snd_emu10k1 *emu);
@@ -1825,7 +1887,8 @@ int snd_emu10k1_synth_copy_from_user(struct snd_emu10k1 *emu, struct snd_util_me
int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk);
/* voice allocation */
-int snd_emu10k1_voice_alloc(struct snd_emu10k1 *emu, int type, int pair, struct snd_emu10k1_voice **rvoice);
+int snd_emu10k1_voice_alloc(struct snd_emu10k1 *emu, int type, int count, int channels,
+ struct snd_emu10k1_pcm *epcm, struct snd_emu10k1_voice **rvoice);
int snd_emu10k1_voice_free(struct snd_emu10k1 *emu, struct snd_emu10k1_voice *pvoice);
/* MIDI uart */
diff --git a/include/sound/emux_synth.h b/include/sound/emux_synth.h
index d499b68122a3..1cc530434b97 100644
--- a/include/sound/emux_synth.h
+++ b/include/sound/emux_synth.h
@@ -54,6 +54,7 @@ struct snd_emux_operators {
#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
int (*oss_ioctl)(struct snd_emux *emu, int cmd, int p1, int p2);
#endif
+ int (*get_pitch_shift)(struct snd_emux *emu);
};
@@ -82,7 +83,6 @@ struct snd_emux {
int max_voices; /* Number of voices */
int mem_size; /* memory size (in byte) */
int num_ports; /* number of ports to be created */
- int pitch_shift; /* pitch shift value (for Emu10k1) */
struct snd_emux_operators ops; /* operators */
void *hw; /* hardware */
unsigned long flags; /* other conditions */
diff --git a/include/sound/graph_card.h b/include/sound/graph_card.h
index 4c8b94c77b8e..8e2e15dfcb1e 100644
--- a/include/sound/graph_card.h
+++ b/include/sound/graph_card.h
@@ -9,27 +9,27 @@
#include <sound/simple_card_utils.h>
-typedef int (*GRAPH2_CUSTOM)(struct asoc_simple_priv *priv,
+typedef int (*GRAPH2_CUSTOM)(struct simple_util_priv *priv,
struct device_node *lnk,
struct link_info *li);
struct graph2_custom_hooks {
- int (*hook_pre)(struct asoc_simple_priv *priv);
- int (*hook_post)(struct asoc_simple_priv *priv);
+ int (*hook_pre)(struct simple_util_priv *priv);
+ int (*hook_post)(struct simple_util_priv *priv);
GRAPH2_CUSTOM custom_normal;
GRAPH2_CUSTOM custom_dpcm;
GRAPH2_CUSTOM custom_c2c;
};
-int audio_graph_parse_of(struct asoc_simple_priv *priv, struct device *dev);
-int audio_graph2_parse_of(struct asoc_simple_priv *priv, struct device *dev,
+int audio_graph_parse_of(struct simple_util_priv *priv, struct device *dev);
+int audio_graph2_parse_of(struct simple_util_priv *priv, struct device *dev,
struct graph2_custom_hooks *hooks);
-int audio_graph2_link_normal(struct asoc_simple_priv *priv,
+int audio_graph2_link_normal(struct simple_util_priv *priv,
struct device_node *lnk, struct link_info *li);
-int audio_graph2_link_dpcm(struct asoc_simple_priv *priv,
+int audio_graph2_link_dpcm(struct simple_util_priv *priv,
struct device_node *lnk, struct link_info *li);
-int audio_graph2_link_c2c(struct asoc_simple_priv *priv,
+int audio_graph2_link_c2c(struct simple_util_priv *priv,
struct device_node *lnk, struct link_info *li);
#endif /* __GRAPH_CARD_H */
diff --git a/include/sound/hda-mlink.h b/include/sound/hda-mlink.h
index 4f44f0bd5388..228114aca415 100644
--- a/include/sound/hda-mlink.h
+++ b/include/sound/hda-mlink.h
@@ -42,6 +42,7 @@ int hdac_bus_eml_power_down_unlocked(struct hdac_bus *bus, bool alt, int elid, i
int hdac_bus_eml_sdw_power_up_unlocked(struct hdac_bus *bus, int sublink);
int hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink);
+int hdac_bus_eml_sdw_get_lsdiid_unlocked(struct hdac_bus *bus, int sublink, u16 *lsdiid);
int hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num);
int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
@@ -146,6 +147,9 @@ static inline int
hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink) { return 0; }
static inline int
+hdac_bus_eml_sdw_get_lsdiid_unlocked(struct hdac_bus *bus, int sublink, u16 *lsdiid) { return 0; }
+
+static inline int
hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num) { return 0; }
static inline int
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index bbb7805e85d8..9c94ba7c183d 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -18,9 +18,6 @@
#include <sound/hda_verbs.h>
#include <sound/hda_regmap.h>
-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
-
/*
* Structures
*/
@@ -144,6 +141,7 @@ struct hda_pcm_stream {
hda_nid_t nid; /* default NID to query rates/formats/bps, or set up */
u32 rates; /* supported rates */
u64 formats; /* supported formats (SNDRV_PCM_FMTBIT_) */
+ u32 subformats; /* for S32_LE format, SNDRV_PCM_SUBFMTBIT_* */
unsigned int maxbps; /* supported max. bit per sample */
const struct snd_pcm_chmap_elem *chmap; /* chmap to override */
struct hda_pcm_ops ops;
@@ -451,8 +449,8 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
#define snd_hda_codec_cleanup_stream(codec, nid) \
__snd_hda_codec_cleanup_stream(codec, nid, 0)
-#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, bpsp) \
- snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, bpsp)
+#define snd_hda_query_supported_pcm(codec, nid, ratesp, fmtsp, subfmtp, bpsp) \
+ snd_hdac_query_supported_pcm(&(codec)->core, nid, ratesp, fmtsp, subfmtp, bpsp)
#define snd_hda_is_supported_format(codec, nid, fmt) \
snd_hdac_is_supported_format(&(codec)->core, nid, fmt)
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 9c7872c0ca79..55958711d697 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -91,6 +91,8 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_SD_BDLPL 0x18
#define AZX_REG_SD_BDLPU 0x1c
+#define AZX_SD_FIFOSIZE_MASK GENMASK(15, 0)
+
/* GTS registers */
#define AZX_REG_LLCH 0x14
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 97f09acae302..a73d7f34f4e5 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iopoll.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
@@ -32,7 +33,7 @@ struct hda_device_id;
/*
* exported bus type
*/
-extern struct bus_type snd_hda_bus_type;
+extern const struct bus_type snd_hda_bus_type;
/*
* generic arrays
@@ -139,13 +140,14 @@ int snd_hdac_get_connections(struct hdac_device *codec, hda_nid_t nid,
hda_nid_t *conn_list, int max_conns);
int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
hda_nid_t *start_id);
-unsigned int snd_hdac_calc_stream_format(unsigned int rate,
- unsigned int channels,
- snd_pcm_format_t format,
- unsigned int maxbps,
- unsigned short spdif_ctls);
+unsigned int snd_hdac_stream_format_bits(snd_pcm_format_t format, snd_pcm_subformat_t subformat,
+ unsigned int maxbits);
+unsigned int snd_hdac_stream_format(unsigned int channels, unsigned int bits, unsigned int rate);
+unsigned int snd_hdac_spdif_stream_format(unsigned int channels, unsigned int bits,
+ unsigned int rate, unsigned short spdif_ctls);
int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid,
- u32 *ratesp, u64 *formatsp, unsigned int *bpsp);
+ u32 *ratesp, u64 *formatsp, u32 *subformatsp,
+ unsigned int *bpsp);
bool snd_hdac_is_supported_format(struct hdac_device *codec, hda_nid_t nid,
unsigned int format);
@@ -347,6 +349,8 @@ struct hdac_bus {
bool corbrp_self_clear:1; /* CORBRP clears itself after reset */
bool polling_mode:1;
bool needs_damn_long_delay:1;
+ bool not_use_interrupts:1; /* prohibiting the RIRB IRQ */
+ bool access_sdnctl_in_dword:1; /* accessing the sdnctl register by dword */
int poll_count;
@@ -570,7 +574,7 @@ void snd_hdac_stream_release(struct hdac_stream *azx_dev);
struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
int dir, int stream_tag);
-int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
+int snd_hdac_stream_setup(struct hdac_stream *azx_dev, bool code_loading);
void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
int snd_hdac_stream_setup_periods(struct hdac_stream *azx_dev);
int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
@@ -621,6 +625,9 @@ int snd_hdac_stream_set_lpib(struct hdac_stream *azx_dev, u32 value);
#define snd_hdac_stream_readb_poll(dev, reg, val, cond, delay_us, timeout_us) \
read_poll_timeout_atomic(snd_hdac_reg_readb, val, cond, delay_us, timeout_us, \
false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
+#define snd_hdac_stream_readw_poll(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout_atomic(snd_hdac_reg_readw, val, cond, delay_us, timeout_us, \
+ false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
#define snd_hdac_stream_readl_poll(dev, reg, val, cond, delay_us, timeout_us) \
read_poll_timeout_atomic(snd_hdac_reg_readl, val, cond, delay_us, timeout_us, \
false, (dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
@@ -702,4 +709,29 @@ static inline unsigned int snd_array_index(struct snd_array *array, void *ptr)
for ((idx) = 0, (ptr) = (array)->list; (idx) < (array)->used; \
(ptr) = snd_array_elem(array, ++(idx)))
+/*
+ * Device matching
+ */
+
+#define HDA_CONTROLLER_IS_HSW(pci) (pci_match_id((struct pci_device_id []){ \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_HSW_0) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_HSW_2) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_HSW_3) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_BDW) }, \
+ { } \
+ }, pci))
+
+#define HDA_CONTROLLER_IS_APL(pci) (pci_match_id((struct pci_device_id []){ \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_APL) }, \
+ { } \
+ }, pci))
+
+#define HDA_CONTROLLER_IN_GPU(pci) (pci_match_id((struct pci_device_id []){ \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG1) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_0) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_1) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_2) }, \
+ { } \
+ }, pci) || HDA_CONTROLLER_IS_HSW(pci))
+
#endif /* __SOUND_HDAUDIO_H */
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 511211f4a2b6..a8bebac1e4b2 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -60,6 +60,8 @@ struct hdac_ext_stream {
bool link_locked:1;
bool link_prepared;
+ int (*host_setup)(struct hdac_stream *, bool);
+
struct snd_pcm_substream *link_substream;
};
@@ -86,6 +88,7 @@ void snd_hdac_ext_stream_start(struct hdac_ext_stream *hext_stream);
void snd_hdac_ext_stream_clear(struct hdac_ext_stream *hext_stream);
void snd_hdac_ext_stream_reset(struct hdac_ext_stream *hext_stream);
int snd_hdac_ext_stream_setup(struct hdac_ext_stream *hext_stream, int fmt);
+int snd_hdac_ext_host_stream_setup(struct hdac_ext_stream *hext_stream, bool code_loading);
struct hdac_ext_link {
struct hdac_bus *bus;
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index 8d6cdb254039..b0da633184cd 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -53,7 +53,7 @@ struct snd_hwdep {
wait_queue_head_t open_wait;
void *private_data;
void (*private_free) (struct snd_hwdep *hwdep);
- struct device dev;
+ struct device *dev;
struct mutex open_mutex;
int used; /* reference counter */
diff --git a/include/sound/info.h b/include/sound/info.h
index 7c13bf52cc81..adbc506860d6 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -118,8 +118,6 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
const char *name,
struct snd_info_entry *parent);
void snd_info_free_entry(struct snd_info_entry *entry);
-int snd_info_store_text(struct snd_info_entry *entry);
-int snd_info_restore_text(struct snd_info_entry *entry);
int snd_info_card_create(struct snd_card *card);
int snd_info_card_register(struct snd_card *card);
diff --git a/include/sound/l3.h b/include/sound/l3.h
deleted file mode 100644
index b6f58072237a..000000000000
--- a/include/sound/l3.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _L3_H_
-#define _L3_H_ 1
-
-struct l3_pins {
- void (*setdat)(struct l3_pins *, int);
- void (*setclk)(struct l3_pins *, int);
- void (*setmode)(struct l3_pins *, int);
-
- int gpio_data;
- int gpio_clk;
- int gpio_mode;
- int use_gpios;
-
- int data_hold;
- int data_setup;
- int clock_high;
- int mode_hold;
- int mode;
- int mode_setup;
-};
-
-struct device;
-
-int l3_write(struct l3_pins *adap, u8 addr, u8 *data, int len);
-int l3_set_gpio_ops(struct device *dev, struct l3_pins *adap);
-
-#endif
diff --git a/include/sound/max9768.h b/include/sound/max9768.h
index 0f78b41d030e..8509ba0079b0 100644
--- a/include/sound/max9768.h
+++ b/include/sound/max9768.h
@@ -9,14 +9,10 @@
/**
* struct max9768_pdata - optional platform specific MAX9768 configuration
- * @shdn_gpio: GPIO to SHDN pin. If not valid, pin must be hardwired HIGH
- * @mute_gpio: GPIO to MUTE pin. If not valid, control for mute won't be added
* @flags: configuration flags, e.g. set classic PWM mode (check datasheet
* regarding "filterless modulation" which is default).
*/
struct max9768_pdata {
- int shdn_gpio;
- int mute_gpio;
unsigned flags;
#define MAX9768_FLAG_CLASSIC_PWM (1 << 0)
};
diff --git a/include/sound/opl3.h b/include/sound/opl3.h
index ebf3852da9fe..052395a2f876 100644
--- a/include/sound/opl3.h
+++ b/include/sound/opl3.h
@@ -229,7 +229,7 @@ struct fm_operator {
unsigned char attack_decay;
unsigned char sustain_release;
unsigned char wave_select;
-} __attribute__((packed));
+} __packed;
/* Instrument data */
struct fm_instrument {
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 19f564606ac4..cc175c623dac 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -16,6 +16,7 @@
#include <linux/bitops.h>
#include <linux/pm_qos.h>
#include <linux/refcount.h>
+#include <linux/uio.h>
#define snd_pcm_substream_chip(substream) ((substream)->private_data)
#define snd_pcm_chip(pcm) ((pcm)->private_data)
@@ -31,6 +32,7 @@
struct snd_pcm_hardware {
unsigned int info; /* SNDRV_PCM_INFO_* */
u64 formats; /* SNDRV_PCM_FMTBIT_* */
+ u32 subformats; /* for S32_LE, SNDRV_PCM_SUBFMTBIT_* */
unsigned int rates; /* SNDRV_PCM_RATE_* */
unsigned int rate_min; /* min rate */
unsigned int rate_max; /* max rate */
@@ -68,11 +70,8 @@ struct snd_pcm_ops {
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*fill_silence)(struct snd_pcm_substream *substream, int channel,
unsigned long pos, unsigned long bytes);
- int (*copy_user)(struct snd_pcm_substream *substream, int channel,
- unsigned long pos, void __user *buf,
- unsigned long bytes);
- int (*copy_kernel)(struct snd_pcm_substream *substream, int channel,
- unsigned long pos, void *buf, unsigned long bytes);
+ int (*copy)(struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, struct iov_iter *iter, unsigned long bytes);
struct page *(*page)(struct snd_pcm_substream *substream,
unsigned long offset);
int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
@@ -219,6 +218,12 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE
#endif
+#define _SNDRV_PCM_SUBFMTBIT(fmt) BIT((__force int)SNDRV_PCM_SUBFORMAT_##fmt)
+#define SNDRV_PCM_SUBFMTBIT_STD _SNDRV_PCM_SUBFMTBIT(STD)
+#define SNDRV_PCM_SUBFMTBIT_MSBITS_MAX _SNDRV_PCM_SUBFMTBIT(MSBITS_MAX)
+#define SNDRV_PCM_SUBFMTBIT_MSBITS_20 _SNDRV_PCM_SUBFMTBIT(MSBITS_20)
+#define SNDRV_PCM_SUBFMTBIT_MSBITS_24 _SNDRV_PCM_SUBFMTBIT(MSBITS_24)
+
struct snd_pcm_file {
struct snd_pcm_substream *substream;
int no_compat_mmap;
@@ -510,7 +515,7 @@ struct snd_pcm_str {
#endif
#endif
struct snd_kcontrol *chmap_kctl; /* channel-mapping controls */
- struct device dev;
+ struct device *dev;
};
struct snd_pcm {
@@ -1556,6 +1561,11 @@ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
#define pcm_dbg(pcm, fmt, args...) \
dev_dbg((pcm)->card->dev, fmt, ##args)
+/* helpers for copying between iov_iter and iomem */
+int copy_to_iter_fromio(struct iov_iter *itert, const void __iomem *src,
+ size_t count);
+int copy_from_iter_toio(void __iomem *dst, struct iov_iter *iter, size_t count);
+
struct snd_pcm_status64 {
snd_pcm_state_t state; /* stream state */
u8 rsvd[4];
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index ba184f49f7e1..fbf35df6e5cf 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -362,6 +362,8 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p)
return snd_pcm_format_physical_width(params_format(p));
}
+int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p);
+
static inline void
params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
{
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index e1f59b2940af..f31cabf0158c 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -18,6 +18,7 @@
#if IS_ENABLED(CONFIG_SND_SEQUENCER)
#include <sound/seq_device.h>
#endif
+#include <sound/info.h>
/*
* Raw MIDI interface
@@ -47,6 +48,10 @@ struct snd_rawmidi_global_ops {
int (*dev_unregister) (struct snd_rawmidi * rmidi);
void (*get_port_info)(struct snd_rawmidi *rmidi, int number,
struct snd_seq_port_info *info);
+ long (*ioctl)(struct snd_rawmidi *rmidi, unsigned int cmd,
+ void __user *argp);
+ void (*proc_read)(struct snd_info_entry *entry,
+ struct snd_info_buffer *buf);
};
struct snd_rawmidi_runtime {
@@ -61,6 +66,7 @@ struct snd_rawmidi_runtime {
size_t avail_min; /* min avail for wakeup */
size_t avail; /* max used buffer for wakeup */
size_t xruns; /* over/underruns counter */
+ size_t align; /* alignment (0 = byte stream, 3 = UMP) */
int buffer_ref; /* buffer reference count */
/* misc */
wait_queue_head_t sleep;
@@ -129,7 +135,7 @@ struct snd_rawmidi {
struct mutex open_mutex;
wait_queue_head_t open_wait;
- struct device dev;
+ struct device *dev;
struct snd_info_entry *proc_entry;
@@ -146,6 +152,13 @@ int snd_rawmidi_new(struct snd_card *card, char *id, int device,
void snd_rawmidi_set_ops(struct snd_rawmidi *rmidi, int stream,
const struct snd_rawmidi_ops *ops);
+/* internal */
+int snd_rawmidi_init(struct snd_rawmidi *rmidi,
+ struct snd_card *card, char *id, int device,
+ int output_count, int input_count,
+ unsigned int info_flags);
+int snd_rawmidi_free(struct snd_rawmidi *rmidi);
+
/* callbacks */
int snd_rawmidi_receive(struct snd_rawmidi_substream *substream,
@@ -161,7 +174,7 @@ int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream);
/* main midi functions */
int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info);
-int snd_rawmidi_kernel_open(struct snd_card *card, int device, int subdevice,
+int snd_rawmidi_kernel_open(struct snd_rawmidi *rmidi, int subdevice,
int mode, struct snd_rawmidi_file *rfile);
int snd_rawmidi_kernel_release(struct snd_rawmidi_file *rfile);
int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
diff --git a/include/sound/rt5665.h b/include/sound/rt5665.h
index 3b3d6a19ca49..e865f041929b 100644
--- a/include/sound/rt5665.h
+++ b/include/sound/rt5665.h
@@ -31,8 +31,6 @@ struct rt5665_platform_data {
bool in3_diff;
bool in4_diff;
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5665_dmic1_data_pin dmic1_data_pin;
enum rt5665_dmic2_data_pin dmic2_data_pin;
enum rt5665_jd_src jd_src;
diff --git a/include/sound/rt5668.h b/include/sound/rt5668.h
index 182edfbc9e7a..b682418c6cd6 100644
--- a/include/sound/rt5668.h
+++ b/include/sound/rt5668.h
@@ -25,9 +25,6 @@ enum rt5668_jd_src {
};
struct rt5668_platform_data {
-
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5668_dmic1_data_pin dmic1_data_pin;
enum rt5668_dmic1_clk_pin dmic1_clk_pin;
enum rt5668_jd_src jd_src;
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
index 3900a07e3935..4256df721e3a 100644
--- a/include/sound/rt5682.h
+++ b/include/sound/rt5682.h
@@ -31,9 +31,6 @@ enum rt5682_dai_clks {
};
struct rt5682_platform_data {
-
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5682_dmic1_data_pin dmic1_data_pin;
enum rt5682_dmic1_clk_pin dmic1_clk_pin;
enum rt5682_jd_src jd_src;
diff --git a/include/sound/rt5682s.h b/include/sound/rt5682s.h
index f18d91308b9a..006e6003d11c 100644
--- a/include/sound/rt5682s.h
+++ b/include/sound/rt5682s.h
@@ -31,16 +31,21 @@ enum rt5682s_dai_clks {
RT5682S_DAI_NUM_CLKS,
};
-struct rt5682s_platform_data {
-
- int ldo1_en; /* GPIO for LDO1_EN */
+enum {
+ RT5682S_LDO_1_607V,
+ RT5682S_LDO_1_5V,
+ RT5682S_LDO_1_406V,
+ RT5682S_LDO_1_731V,
+};
+struct rt5682s_platform_data {
enum rt5682s_dmic1_data_pin dmic1_data_pin;
enum rt5682s_dmic1_clk_pin dmic1_clk_pin;
enum rt5682s_jd_src jd_src;
unsigned int dmic_clk_rate;
unsigned int dmic_delay;
unsigned int amic_delay;
+ unsigned int ldo_dacref;
bool dmic_clk_driving_high;
const char *dai_clk_names[RT5682S_DAI_NUM_CLKS];
diff --git a/include/sound/seq_device.h b/include/sound/seq_device.h
index 8899affe9155..dead74b022f4 100644
--- a/include/sound/seq_device.h
+++ b/include/sound/seq_device.h
@@ -78,5 +78,6 @@ void snd_seq_driver_unregister(struct snd_seq_driver *drv);
*/
#define SNDRV_SEQ_DEV_ID_MIDISYNTH "seq-midi"
#define SNDRV_SEQ_DEV_ID_OPL3 "opl3-synth"
+#define SNDRV_SEQ_DEV_ID_UMP "seq-ump-client"
#endif /* __SOUND_SEQ_DEVICE_H */
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index 658911926f3a..c8621671fa70 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -70,9 +70,19 @@ int snd_seq_kernel_client_ctl(int client, unsigned int cmd, void *arg);
typedef int (*snd_seq_dump_func_t)(void *ptr, void *buf, int count);
int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
int in_kernel, int size_aligned);
+int snd_seq_expand_var_event_at(const struct snd_seq_event *event, int count,
+ char *buf, int offset);
int snd_seq_dump_var_event(const struct snd_seq_event *event,
snd_seq_dump_func_t func, void *private_data);
+/* size of the event packet; it can be greater than snd_seq_event size */
+static inline size_t snd_seq_event_packet_size(struct snd_seq_event *ev)
+{
+ if (snd_seq_ev_is_ump(ev))
+ return sizeof(struct snd_seq_ump_event);
+ return sizeof(struct snd_seq_event);
+}
+
/* interface for OSS emulation */
int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo);
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index d264e5463f22..2e999916dbd7 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -12,15 +12,15 @@
#include <sound/soc.h>
#include <sound/simple_card_utils.h>
-struct asoc_simple_card_info {
+struct simple_util_info {
const char *name;
const char *card;
const char *codec;
const char *platform;
unsigned int daifmt;
- struct asoc_simple_dai cpu_dai;
- struct asoc_simple_dai codec_dai;
+ struct simple_util_dai cpu_dai;
+ struct simple_util_dai codec_dai;
};
#endif /* __SIMPLE_CARD_H */
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index a3f3f3aa9e6e..ad67957b7b48 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -11,18 +11,18 @@
#include <linux/clk.h>
#include <sound/soc.h>
-#define asoc_simple_init_hp(card, sjack, prefix) \
- asoc_simple_init_jack(card, sjack, 1, prefix, NULL)
-#define asoc_simple_init_mic(card, sjack, prefix) \
- asoc_simple_init_jack(card, sjack, 0, prefix, NULL)
+#define simple_util_init_hp(card, sjack, prefix) \
+ simple_util_init_jack(card, sjack, 1, prefix, NULL)
+#define simple_util_init_mic(card, sjack, prefix) \
+ simple_util_init_jack(card, sjack, 0, prefix, NULL)
-struct asoc_simple_tdm_width_map {
+struct simple_util_tdm_width_map {
u8 sample_bits;
u8 slot_count;
u16 slot_width;
};
-struct asoc_simple_dai {
+struct simple_util_dai {
const char *name;
unsigned int sysclk;
int clk_direction;
@@ -32,17 +32,17 @@ struct asoc_simple_dai {
unsigned int rx_slot_mask;
struct clk *clk;
bool clk_fixed;
- struct asoc_simple_tdm_width_map *tdm_width_map;
+ struct simple_util_tdm_width_map *tdm_width_map;
int n_tdm_widths;
};
-struct asoc_simple_data {
+struct simple_util_data {
u32 convert_rate;
u32 convert_channels;
const char *convert_sample_format;
};
-struct asoc_simple_jack {
+struct simple_util_jack {
struct snd_soc_jack jack;
struct snd_soc_jack_pin pin;
struct snd_soc_jack_gpio gpio;
@@ -54,26 +54,22 @@ struct prop_nums {
int platforms;
};
-struct asoc_simple_priv {
+struct simple_util_priv {
struct snd_soc_card snd_card;
struct simple_dai_props {
- struct asoc_simple_dai *cpu_dai;
- struct asoc_simple_dai *codec_dai;
- struct snd_soc_dai_link_component *cpus;
- struct snd_soc_dai_link_component *codecs;
- struct snd_soc_dai_link_component *platforms;
- struct asoc_simple_data adata;
+ struct simple_util_dai *cpu_dai;
+ struct simple_util_dai *codec_dai;
+ struct simple_util_data adata;
struct snd_soc_codec_conf *codec_conf;
struct prop_nums num;
unsigned int mclk_fs;
} *dai_props;
- struct asoc_simple_jack hp_jack;
- struct asoc_simple_jack mic_jack;
+ struct simple_util_jack hp_jack;
+ struct simple_util_jack mic_jack;
struct snd_soc_jack *aux_jacks;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dais;
+ struct simple_util_dai *dais;
struct snd_soc_dai_link_component *dlcs;
- struct snd_soc_dai_link_component dummy;
struct snd_soc_codec_conf *codec_conf;
struct gpio_desc *pa_gpio;
const struct snd_soc_ops *ops;
@@ -134,73 +130,78 @@ struct link_info {
struct prop_nums num[SNDRV_MAX_LINKS];
};
-int asoc_simple_parse_daifmt(struct device *dev,
+int simple_util_parse_daifmt(struct device *dev,
struct device_node *node,
struct device_node *codec,
char *prefix,
unsigned int *retfmt);
-int asoc_simple_parse_tdm_width_map(struct device *dev, struct device_node *np,
- struct asoc_simple_dai *dai);
+int simple_util_parse_tdm_width_map(struct device *dev, struct device_node *np,
+ struct simple_util_dai *dai);
__printf(3, 4)
-int asoc_simple_set_dailink_name(struct device *dev,
+int simple_util_set_dailink_name(struct device *dev,
struct snd_soc_dai_link *dai_link,
const char *fmt, ...);
-int asoc_simple_parse_card_name(struct snd_soc_card *card,
+int simple_util_parse_card_name(struct snd_soc_card *card,
char *prefix);
-int asoc_simple_parse_clk(struct device *dev,
+int simple_util_parse_clk(struct device *dev,
struct device_node *node,
- struct asoc_simple_dai *simple_dai,
+ struct simple_util_dai *simple_dai,
struct snd_soc_dai_link_component *dlc);
-int asoc_simple_startup(struct snd_pcm_substream *substream);
-void asoc_simple_shutdown(struct snd_pcm_substream *substream);
-int asoc_simple_hw_params(struct snd_pcm_substream *substream,
+int simple_util_startup(struct snd_pcm_substream *substream);
+void simple_util_shutdown(struct snd_pcm_substream *substream);
+int simple_util_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-int asoc_simple_dai_init(struct snd_soc_pcm_runtime *rtd);
-int asoc_simple_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+int simple_util_dai_init(struct snd_soc_pcm_runtime *rtd);
+int simple_util_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params);
-#define asoc_simple_parse_tdm(np, dai) \
+#define simple_util_parse_tdm(np, dai) \
snd_soc_of_parse_tdm_slot(np, &(dai)->tx_slot_mask, \
&(dai)->rx_slot_mask, \
&(dai)->slots, \
&(dai)->slot_width);
-void asoc_simple_canonicalize_platform(struct snd_soc_dai_link_component *platforms,
+void simple_util_canonicalize_platform(struct snd_soc_dai_link_component *platforms,
struct snd_soc_dai_link_component *cpus);
-void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link_component *cpus,
+void simple_util_canonicalize_cpu(struct snd_soc_dai_link_component *cpus,
int is_single_links);
-void asoc_simple_clean_reference(struct snd_soc_card *card);
+void simple_util_clean_reference(struct snd_soc_card *card);
-void asoc_simple_parse_convert(struct device_node *np, char *prefix,
- struct asoc_simple_data *data);
-bool asoc_simple_is_convert_required(const struct asoc_simple_data *data);
+void simple_util_parse_convert(struct device_node *np, char *prefix,
+ struct simple_util_data *data);
+bool simple_util_is_convert_required(const struct simple_util_data *data);
-int asoc_simple_parse_routing(struct snd_soc_card *card,
+int simple_util_parse_routing(struct snd_soc_card *card,
char *prefix);
-int asoc_simple_parse_widgets(struct snd_soc_card *card,
+int simple_util_parse_widgets(struct snd_soc_card *card,
char *prefix);
-int asoc_simple_parse_pin_switches(struct snd_soc_card *card,
+int simple_util_parse_pin_switches(struct snd_soc_card *card,
char *prefix);
-int asoc_simple_init_jack(struct snd_soc_card *card,
- struct asoc_simple_jack *sjack,
+int simple_util_init_jack(struct snd_soc_card *card,
+ struct simple_util_jack *sjack,
int is_hp, char *prefix, char *pin);
-int asoc_simple_init_aux_jacks(struct asoc_simple_priv *priv,
+int simple_util_init_aux_jacks(struct simple_util_priv *priv,
char *prefix);
-int asoc_simple_init_priv(struct asoc_simple_priv *priv,
+int simple_util_init_priv(struct simple_util_priv *priv,
struct link_info *li);
-int asoc_simple_remove(struct platform_device *pdev);
+void simple_util_remove(struct platform_device *pdev);
-int asoc_graph_card_probe(struct snd_soc_card *card);
-int asoc_graph_is_ports0(struct device_node *port);
+int graph_util_card_probe(struct snd_soc_card *card);
+int graph_util_is_ports0(struct device_node *port);
+int graph_util_parse_dai(struct device *dev, struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc, int *is_single_link);
+
+int graph_util_parse_link_direction(struct device_node *np,
+ bool *is_playback_only, bool *is_capture_only);
#ifdef DEBUG
-static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+static inline void simple_util_debug_dai(struct simple_util_priv *priv,
char *name,
- struct asoc_simple_dai *dai)
+ struct simple_util_dai *dai)
{
struct device *dev = simple_priv_to_dev(priv);
@@ -230,7 +231,7 @@ static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
name, dai->clk_direction ? "OUT" : "IN");
}
-static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
+static inline void simple_util_debug_info(struct simple_util_priv *priv)
{
struct snd_soc_card *card = simple_priv_to_card(priv);
struct device *dev = simple_priv_to_dev(priv);
@@ -243,7 +244,7 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
for (i = 0; i < card->num_links; i++) {
struct simple_dai_props *props = simple_priv_to_props(priv, i);
struct snd_soc_dai_link *link = simple_priv_to_link(priv, i);
- struct asoc_simple_dai *dai;
+ struct simple_util_dai *dai;
struct snd_soc_codec_conf *cnf;
int j;
@@ -251,10 +252,10 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
dev_dbg(dev, "cpu num = %d\n", link->num_cpus);
for_each_prop_dai_cpu(props, j, dai)
- asoc_simple_debug_dai(priv, "cpu", dai);
+ simple_util_debug_dai(priv, "cpu", dai);
dev_dbg(dev, "codec num = %d\n", link->num_codecs);
for_each_prop_dai_codec(props, j, dai)
- asoc_simple_debug_dai(priv, "codec", dai);
+ simple_util_debug_dai(priv, "codec", dai);
if (link->name)
dev_dbg(dev, "dai name = %s\n", link->name);
@@ -272,7 +273,7 @@ static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
}
}
#else
-#define asoc_simple_debug_info(priv)
+#define simple_util_debug_info(priv)
#endif /* DEBUG */
#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 82a7db23db69..845e7608ac37 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -31,6 +31,8 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[];
@@ -40,6 +42,8 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_lnl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_arl_sdw_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 528279056b3a..23d6d6bfb073 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -9,6 +9,7 @@
#include <linux/stddef.h>
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
+#include <linux/soundwire/sdw.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
@@ -67,6 +68,10 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @i2s_link_mask: I2S/TDM links enabled on the board
* @num_dai_drivers: number of elements in @dai_drivers
* @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode
+ * @subsystem_vendor: optional PCI SSID vendor value
+ * @subsystem_device: optional PCI SSID device value
+ * @subsystem_id_set: true if a value has been written to
+ * subsystem_vendor and subsystem_device.
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
@@ -79,6 +84,9 @@ struct snd_soc_acpi_mach_params {
u32 i2s_link_mask;
u32 num_dai_drivers;
struct snd_soc_dai_driver *dai_drivers;
+ unsigned short subsystem_vendor;
+ unsigned short subsystem_device;
+ bool subsystem_id_set;
};
/**
@@ -150,6 +158,7 @@ struct snd_soc_acpi_link_adr {
* all firmware/topology related fields.
*
* @id: ACPI ID (usually the codec's) used to find a matching machine driver.
+ * @uid: ACPI Unique ID, can be used to disambiguate matches.
* @comp_ids: list of compatible audio codecs using the same machine driver,
* firmware and topology
* @link_mask: describes required board layout, e.g. for SoundWire.
@@ -208,4 +217,9 @@ static inline bool snd_soc_acpi_sof_parent(struct device *dev)
!strncmp(dev->parent->driver->name, "sof-audio-acpi", strlen("sof-audio-acpi"));
}
+bool snd_soc_acpi_sdw_link_slaves_found(struct device *dev,
+ const struct snd_soc_acpi_link_adr *link,
+ struct sdw_extended_slave_id *ids,
+ int num_slaves);
+
#endif
diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h
index fc94dfb0021f..1f4c39922d82 100644
--- a/include/sound/soc-card.h
+++ b/include/sound/soc-card.h
@@ -30,6 +30,8 @@ static inline void snd_soc_card_mutex_unlock(struct snd_soc_card *card)
struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
const char *name);
+struct snd_kcontrol *snd_soc_card_get_kcontrol_locked(struct snd_soc_card *soc_card,
+ const char *name);
int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type,
struct snd_soc_jack *jack);
int snd_soc_card_jack_new_pins(struct snd_soc_card *card, const char *id,
@@ -59,6 +61,43 @@ int snd_soc_card_add_dai_link(struct snd_soc_card *card,
void snd_soc_card_remove_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link);
+#ifdef CONFIG_PCI
+static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
+ unsigned short vendor,
+ unsigned short device)
+{
+ card->pci_subsystem_vendor = vendor;
+ card->pci_subsystem_device = device;
+ card->pci_subsystem_set = true;
+}
+
+static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
+ unsigned short *vendor,
+ unsigned short *device)
+{
+ if (!card->pci_subsystem_set)
+ return -ENOENT;
+
+ *vendor = card->pci_subsystem_vendor;
+ *device = card->pci_subsystem_device;
+
+ return 0;
+}
+#else /* !CONFIG_PCI */
+static inline void snd_soc_card_set_pci_ssid(struct snd_soc_card *card,
+ unsigned short vendor,
+ unsigned short device)
+{
+}
+
+static inline int snd_soc_card_get_pci_ssid(struct snd_soc_card *card,
+ unsigned short *vendor,
+ unsigned short *device)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_PCI */
+
/* device driver data */
static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
void *data)
@@ -78,8 +117,8 @@ struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd;
for_each_card_rtds(card, rtd) {
- if (!strcmp(asoc_rtd_to_codec(rtd, 0)->name, dai_name))
- return asoc_rtd_to_codec(rtd, 0);
+ if (!strcmp(snd_soc_rtd_to_codec(rtd, 0)->name, dai_name))
+ return snd_soc_rtd_to_codec(rtd, 0);
}
return NULL;
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 0814ed143864..ceca69b46a82 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -137,10 +137,10 @@ struct snd_soc_component_driver {
struct timespec64 *audio_ts,
struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
- int (*copy_user)(struct snd_soc_component *component,
- struct snd_pcm_substream *substream, int channel,
- unsigned long pos, void __user *buf,
- unsigned long bytes);
+ int (*copy)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, struct iov_iter *iter,
+ unsigned long bytes);
struct page *(*page)(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
unsigned long offset);
@@ -159,6 +159,15 @@ struct snd_soc_component_driver {
int remove_order;
/*
+ * soc_pcm_trigger() start/stop sequence.
+ * see also
+ * snd_soc_dai_link
+ * soc_pcm_trigger()
+ */
+ enum snd_soc_trigger_order trigger_start;
+ enum snd_soc_trigger_order trigger_stop;
+
+ /*
* signal if the module handling the component should not be removed
* if a pcm is open. Setting this would prevent the module
* refcount being incremented in probe() but allow it be incremented
@@ -190,8 +199,6 @@ struct snd_soc_component_driver {
bool use_dai_pcm_id; /* use DAI link PCM ID as PCM device number */
int be_pcm_base; /* base device ID for all BE PCMs */
- unsigned int start_dma_last;
-
#ifdef CONFIG_DEBUG_FS
const char *debugfs_prefix;
#endif
@@ -454,6 +461,10 @@ int snd_soc_component_force_enable_pin_unlocked(
struct snd_soc_component *component,
const char *pin);
+/* component controls */
+int snd_soc_component_notify_control(struct snd_soc_component *component,
+ const char * const ctl);
+
/* component driver ops */
int snd_soc_component_open(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
@@ -498,9 +509,9 @@ int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream);
-int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
- int channel, unsigned long pos,
- void __user *buf, unsigned long bytes);
+int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ struct iov_iter *iter, unsigned long bytes);
struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
unsigned long offset);
int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e3906ecda740..adcd8719d343 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -271,7 +271,18 @@ int snd_soc_dai_compr_get_metadata(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream,
struct snd_compr_metadata *metadata);
+const char *snd_soc_dai_name_get(struct snd_soc_dai *dai);
+
struct snd_soc_dai_ops {
+ /* DAI driver callbacks */
+ int (*probe)(struct snd_soc_dai *dai);
+ int (*remove)(struct snd_soc_dai *dai);
+ /* compress dai */
+ int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num);
+ /* Optional Callback used at pcm creation*/
+ int (*pcm_new)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai);
+
/*
* DAI clocking configuration, all optional.
* Called by soc_card drivers, normally in their hw_params.
@@ -353,8 +364,13 @@ struct snd_soc_dai_ops {
u64 *auto_selectable_formats;
int num_auto_selectable_formats;
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
+
/* bit field */
unsigned int no_capture_mute:1;
+ unsigned int mute_unmute_on_trigger:1;
};
struct snd_soc_cdai_ops {
@@ -397,15 +413,7 @@ struct snd_soc_dai_driver {
unsigned int id;
unsigned int base;
struct snd_soc_dobj dobj;
-
- /* DAI driver callbacks */
- int (*probe)(struct snd_soc_dai *dai);
- int (*remove)(struct snd_soc_dai *dai);
- /* compress dai */
- int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num);
- /* Optional Callback used at pcm creation*/
- int (*pcm_new)(struct snd_soc_pcm_runtime *rtd,
- struct snd_soc_dai *dai);
+ struct of_phandle_args *dai_args;
/* ops */
const struct snd_soc_dai_ops *ops;
@@ -417,10 +425,6 @@ struct snd_soc_dai_driver {
unsigned int symmetric_rate:1;
unsigned int symmetric_channels:1;
unsigned int symmetric_sample_bits:1;
-
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
};
/* for Playback/Capture */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 87f8e1793af1..667ecd4daa68 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -42,36 +42,45 @@ struct soc_enum;
/* codec domain */
#define SND_SOC_DAPM_VMID(wname) \
-{ .id = snd_soc_dapm_vmid, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_vmid, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0}
/* platform domain */
#define SND_SOC_DAPM_SIGGEN(wname) \
-{ .id = snd_soc_dapm_siggen, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_siggen, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_SINK(wname) \
-{ .id = snd_soc_dapm_sink, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_sink, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_INPUT(wname) \
-{ .id = snd_soc_dapm_input, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_input, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_OUTPUT(wname) \
-{ .id = snd_soc_dapm_output, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_output, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_MIC(wname, wevent) \
-{ .id = snd_soc_dapm_mic, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mic, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
#define SND_SOC_DAPM_HP(wname, wevent) \
-{ .id = snd_soc_dapm_hp, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_hp, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_SPK(wname, wevent) \
-{ .id = snd_soc_dapm_spk, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_spk, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_LINE(wname, wevent) \
-{ .id = snd_soc_dapm_line, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_line, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
@@ -82,93 +91,110 @@ struct soc_enum;
/* path domain */
#define SND_SOC_DAPM_PGA(wname, wreg, wshift, winvert,\
wcontrols, wncontrols) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_OUT_DRV(wname, wreg, wshift, winvert,\
wcontrols, wncontrols) \
-{ .id = snd_soc_dapm_out_drv, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_out_drv, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_MIXER(wname, wreg, wshift, winvert, \
wcontrols, wncontrols)\
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_MIXER_NAMED_CTL(wname, wreg, wshift, winvert, \
wcontrols, wncontrols)\
-{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
/* DEPRECATED: use SND_SOC_DAPM_SUPPLY */
#define SND_SOC_DAPM_MICBIAS(wname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_micbias, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_micbias, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = NULL, .num_kcontrols = 0}
#define SND_SOC_DAPM_SWITCH(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_switch, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_switch, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_mux, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_DEMUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_demux, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_demux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_ARRAY(wname, wreg, wshift, winvert,\
wcontrols) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
#define SOC_MIXER_ARRAY(wname, wreg, wshift, winvert, \
wcontrols)\
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
#define SOC_MIXER_NAMED_CTL_ARRAY(wname, wreg, wshift, winvert, \
wcontrols)\
-{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
/* path domain with event - event handler must return 0 for success */
#define SND_SOC_DAPM_PGA_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_OUT_DRV_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_out_drv, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_out_drv, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MIXER_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MIXER_NAMED_CTL_E(wname, wreg, wshift, winvert, \
wcontrols, wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, \
.num_kcontrols = wncontrols, .event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_SWITCH_E(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_switch, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_switch, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_mux, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
@@ -176,101 +202,121 @@ struct soc_enum;
/* additional sequencing control within an event type */
#define SND_SOC_DAPM_PGA_S(wname, wsubseq, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags, \
.subseq = wsubseq}
#define SND_SOC_DAPM_SUPPLY_S(wname, wsubseq, wreg, wshift, winvert, wevent, \
wflags) \
-{ .id = snd_soc_dapm_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_supply, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags, .subseq = wsubseq}
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
#define SOC_MIXER_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
#define SOC_MIXER_NAMED_CTL_E_ARRAY(wname, wreg, wshift, winvert, \
wcontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
/* events that are pre and post DAPM */
#define SND_SOC_DAPM_PRE(wname, wevent) \
-{ .id = snd_soc_dapm_pre, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pre, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_POST(wname, wevent) \
-{ .id = snd_soc_dapm_post, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_post, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
/* stream domain */
#define SND_SOC_DAPM_AIF_IN(wname, stname, wchan, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_AIF_IN_E(wname, stname, wchan, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
#define SND_SOC_DAPM_AIF_OUT(wname, stname, wchan, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_AIF_OUT_E(wname, stname, wchan, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert) }
#define SND_SOC_DAPM_DAC_E(wname, stname, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_ADC_E(wname, stname, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_CLOCK_SUPPLY(wname) \
-{ .id = snd_soc_dapm_clock_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_clock_supply, .name = wname, \
.reg = SND_SOC_NOPM, .event = dapm_clock_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }
/* generic widgets */
#define SND_SOC_DAPM_REG(wid, wname, wreg, wshift, wmask, won_val, woff_val) \
-{ .id = wid, .name = wname, .kcontrol_news = NULL, .num_kcontrols = 0, \
+(struct snd_soc_dapm_widget) { \
+ .id = wid, .name = wname, .kcontrol_news = NULL, .num_kcontrols = 0, \
.reg = wreg, .shift = wshift, .mask = wmask, \
.on_val = won_val, .off_val = woff_val, }
#define SND_SOC_DAPM_SUPPLY(wname, wreg, wshift, winvert, wevent, wflags) \
-{ .id = snd_soc_dapm_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_supply, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_REGULATOR_SUPPLY(wname, wdelay, wflags) \
-{ .id = snd_soc_dapm_regulator_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_regulator_supply, .name = wname, \
.reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
.on_val = wflags}
#define SND_SOC_DAPM_PINCTRL(wname, active, sleep) \
-{ .id = snd_soc_dapm_pinctrl, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pinctrl, .name = wname, \
.priv = (&(struct snd_soc_dapm_pinctrl_priv) \
{ .active_state = active, .sleep_state = sleep,}), \
.reg = SND_SOC_NOPM, .event = dapm_pinctrl_event, \
@@ -423,6 +469,7 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
@@ -604,6 +651,7 @@ struct snd_soc_dapm_widget {
unsigned char power_checked:1; /* power checked this run */
unsigned char is_supply:1; /* Widget is a supply type widget */
unsigned char is_ep:2; /* Widget is a endpoint type widget */
+ unsigned char no_wname_in_kcontrol_name:1; /* No widget name prefix in kcontrol name */
int subseq; /* sort within widget type */
int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -670,7 +718,7 @@ struct snd_soc_dapm_context {
/* A list of widgets associated with an object, typically a snd_kcontrol */
struct snd_soc_dapm_widget_list {
int num_widgets;
- struct snd_soc_dapm_widget *widgets[];
+ struct snd_soc_dapm_widget *widgets[] __counted_by(num_widgets);
};
#define for_each_dapm_widgets(list, i, widget) \
diff --git a/include/sound/soc-dmaengine.h b/include/sound/soc-dmaengine.h
new file mode 100644
index 000000000000..b435133de04d
--- /dev/null
+++ b/include/sound/soc-dmaengine.h
@@ -0,0 +1,26 @@
+/*
+ * Generic ASoC DMA engine backend
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _LINUX_SND_SOC_DMAENGINE_H
+#define _LINUX_SND_SOC_DMAENGINE_H
+
+#include <linux/dmaengine.h>
+
+struct soc_dma_config {
+ dma_filter_fn filter;
+ void *data;
+
+ dma_addr_t reg;
+ enum dma_slave_buswidth width;
+ u32 maxburst;
+ size_t align;
+ size_t fifo_size;
+};
+
+#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 05004c048dd5..6defc5547ff9 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -10,6 +10,7 @@
#ifndef __LINUX_SND_SOC_H
#define __LINUX_SND_SOC_H
+#include <linux/args.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/types.h>
@@ -607,10 +608,19 @@ int snd_soc_get_strobe(struct snd_kcontrol *kcontrol,
int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
+enum snd_soc_trigger_order {
+ /* start stop */
+ SND_SOC_TRIGGER_ORDER_DEFAULT = 0, /* Link->Component->DAI DAI->Component->Link */
+ SND_SOC_TRIGGER_ORDER_LDC, /* Link->DAI->Component Component->DAI->Link */
+
+ SND_SOC_TRIGGER_ORDER_MAX,
+};
+
/* SoC PCM stream information */
struct snd_soc_pcm_stream {
const char *stream_name;
u64 formats; /* SNDRV_PCM_FMTBIT_* */
+ u32 subformats; /* for S32_LE format, SNDRV_PCM_SUBFMTBIT_* */
unsigned int rates; /* SNDRV_PCM_RATE_* */
unsigned int rate_min; /* min rate */
unsigned int rate_max; /* max rate */
@@ -633,7 +643,6 @@ struct snd_soc_compr_ops {
int (*startup)(struct snd_compr_stream *);
void (*shutdown)(struct snd_compr_stream *);
int (*set_params)(struct snd_compr_stream *);
- int (*trigger)(struct snd_compr_stream *);
};
struct snd_soc_component*
@@ -644,6 +653,49 @@ struct snd_soc_dai_link_component {
const char *name;
struct device_node *of_node;
const char *dai_name;
+ struct of_phandle_args *dai_args;
+};
+
+/*
+ * [dai_link->ch_maps Image sample]
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ * CPU1 <---> Codec1
+ * CPU2 <---> Codec2
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ * ch-map[1].cpu = 1 ch-map[1].codec = 1
+ * ch-map[2].cpu = 2 ch-map[2].codec = 2
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ * CPU1 <-+-> Codec1
+ * CPU2 <-/
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ * ch-map[1].cpu = 1 ch-map[1].codec = 1
+ * ch-map[2].cpu = 2 ch-map[2].codec = 1
+ *
+ *-------------------------
+ * CPU0 <---> Codec0
+ * CPU1 <-+-> Codec1
+ * \-> Codec2
+ *
+ * ch-map[0].cpu = 0 ch-map[0].codec = 0
+ * ch-map[1].cpu = 1 ch-map[1].codec = 1
+ * ch-map[2].cpu = 1 ch-map[2].codec = 2
+ *
+ */
+struct snd_soc_dai_link_ch_map {
+ unsigned int cpu;
+ unsigned int codec;
+ unsigned int ch_mask;
};
struct snd_soc_dai_link {
@@ -674,6 +726,9 @@ struct snd_soc_dai_link {
struct snd_soc_dai_link_component *codecs;
unsigned int num_codecs;
+ /* num_ch_maps = max(num_cpu, num_codecs) */
+ struct snd_soc_dai_link_ch_map *ch_maps;
+
/*
* You MAY specify the link's platform/PCM/DMA driver, either by
* device name, or by DT/OF node, but not both. Some forms of link
@@ -708,6 +763,15 @@ struct snd_soc_dai_link {
const struct snd_soc_ops *ops;
const struct snd_soc_compr_ops *compr_ops;
+ /*
+ * soc_pcm_trigger() start/stop sequence.
+ * see also
+ * snd_soc_component_driver
+ * soc_pcm_trigger()
+ */
+ enum snd_soc_trigger_order trigger_start;
+ enum snd_soc_trigger_order trigger_stop;
+
/* Mark this pcm with non atomic ops */
unsigned int nonatomic:1;
@@ -746,48 +810,52 @@ struct snd_soc_dai_link {
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int ignore:1;
- /* This flag will reorder stop sequence. By enabling this flag
- * DMA controller stop sequence will be invoked first followed by
- * CPU DAI driver stop sequence
- */
- unsigned int stop_dma_first:1;
-
#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj; /* For topology */
#endif
};
+static inline int snd_soc_link_num_ch_map(struct snd_soc_dai_link *link) {
+ return max(link->num_cpus, link->num_codecs);
+}
+
static inline struct snd_soc_dai_link_component*
-asoc_link_to_cpu(struct snd_soc_dai_link *link, int n) {
+snd_soc_link_to_cpu(struct snd_soc_dai_link *link, int n) {
return &(link)->cpus[n];
}
static inline struct snd_soc_dai_link_component*
-asoc_link_to_codec(struct snd_soc_dai_link *link, int n) {
+snd_soc_link_to_codec(struct snd_soc_dai_link *link, int n) {
return &(link)->codecs[n];
}
static inline struct snd_soc_dai_link_component*
-asoc_link_to_platform(struct snd_soc_dai_link *link, int n) {
+snd_soc_link_to_platform(struct snd_soc_dai_link *link, int n) {
return &(link)->platforms[n];
}
#define for_each_link_codecs(link, i, codec) \
for ((i) = 0; \
((i) < link->num_codecs) && \
- ((codec) = asoc_link_to_codec(link, i)); \
+ ((codec) = snd_soc_link_to_codec(link, i)); \
(i)++)
#define for_each_link_platforms(link, i, platform) \
for ((i) = 0; \
((i) < link->num_platforms) && \
- ((platform) = asoc_link_to_platform(link, i)); \
+ ((platform) = snd_soc_link_to_platform(link, i)); \
(i)++)
#define for_each_link_cpus(link, i, cpu) \
for ((i) = 0; \
((i) < link->num_cpus) && \
- ((cpu) = asoc_link_to_cpu(link, i)); \
+ ((cpu) = snd_soc_link_to_cpu(link, i)); \
+ (i)++)
+
+#define for_each_link_ch_maps(link, i, ch_map) \
+ for ((i) = 0; \
+ ((i) < snd_soc_link_num_ch_map(link) && \
+ ((ch_map) = link->ch_maps + i)); \
(i)++)
/*
@@ -853,12 +921,8 @@ asoc_link_to_platform(struct snd_soc_dai_link *link, int n) {
.platforms = platform, \
.num_platforms = ARRAY_SIZE(platform)
-#define SND_SOC_DAILINK_REGx(_1, _2, _3, func, ...) func
#define SND_SOC_DAILINK_REG(...) \
- SND_SOC_DAILINK_REGx(__VA_ARGS__, \
- SND_SOC_DAILINK_REG3, \
- SND_SOC_DAILINK_REG2, \
- SND_SOC_DAILINK_REG1)(__VA_ARGS__)
+ CONCATENATE(SND_SOC_DAILINK_REG, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
#define SND_SOC_DAILINK_DEF(name, def...) \
static struct snd_soc_dai_link_component name[] = { def }
@@ -875,9 +939,10 @@ asoc_link_to_platform(struct snd_soc_dai_link *link, int n) {
#define COMP_PLATFORM(_name) { .name = _name }
#define COMP_AUX(_name) { .name = _name }
#define COMP_CODEC_CONF(_name) { .name = _name }
-#define COMP_DUMMY() { .name = "snd-soc-dummy", .dai_name = "snd-soc-dummy-dai", }
+#define COMP_DUMMY() /* see snd_soc_fill_dummy_dai() */
extern struct snd_soc_dai_link_component null_dailink_component[0];
+extern struct snd_soc_dai_link_component snd_soc_dummy_dlc;
struct snd_soc_codec_conf {
@@ -914,6 +979,17 @@ struct snd_soc_card {
#ifdef CONFIG_DMI
char dmi_longname[80];
#endif /* CONFIG_DMI */
+
+#ifdef CONFIG_PCI
+ /*
+ * PCI does not define 0 as invalid, so pci_subsystem_set indicates
+ * whether a value has been written to these fields.
+ */
+ unsigned short pci_subsystem_vendor;
+ unsigned short pci_subsystem_device;
+ bool pci_subsystem_set;
+#endif /* CONFIG_PCI */
+
char topology_shortname[32];
struct device *dev;
@@ -1084,8 +1160,8 @@ struct snd_soc_pcm_runtime {
* dais = cpu_dai + codec_dai
* see
* soc_new_pcm_runtime()
- * asoc_rtd_to_cpu()
- * asoc_rtd_to_codec()
+ * snd_soc_rtd_to_cpu()
+ * snd_soc_rtd_to_codec()
*/
struct snd_soc_dai **dais;
@@ -1108,13 +1184,16 @@ struct snd_soc_pcm_runtime {
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
+ bool initialized;
+
int num_components;
struct snd_soc_component *components[]; /* CPU/Codec/Platform */
};
+
/* see soc_new_pcm_runtime() */
-#define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
-#define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus]
-#define asoc_substream_to_rtd(substream) \
+#define snd_soc_rtd_to_cpu(rtd, n) (rtd)->dais[n]
+#define snd_soc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->dai_link->num_cpus]
+#define snd_soc_substream_to_rtd(substream) \
(struct snd_soc_pcm_runtime *)snd_pcm_substream_chip(substream)
#define for_each_rtd_components(rtd, i, component) \
@@ -1123,17 +1202,18 @@ struct snd_soc_pcm_runtime {
(i)++)
#define for_each_rtd_cpu_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < rtd->dai_link->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \
+ ((i) < rtd->dai_link->num_cpus) && ((dai) = snd_soc_rtd_to_cpu(rtd, i)); \
(i)++)
#define for_each_rtd_codec_dais(rtd, i, dai) \
for ((i) = 0; \
- ((i) < rtd->dai_link->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \
+ ((i) < rtd->dai_link->num_codecs) && ((dai) = snd_soc_rtd_to_codec(rtd, i)); \
(i)++)
#define for_each_rtd_dais(rtd, i, dai) \
for ((i) = 0; \
((i) < (rtd)->dai_link->num_cpus + (rtd)->dai_link->num_codecs) && \
((dai) = (rtd)->dais[i]); \
(i)++)
+#define for_each_rtd_ch_maps(rtd, i, ch_maps) for_each_link_ch_maps(rtd->dai_link, i, ch_maps)
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
@@ -1291,11 +1371,18 @@ unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
snd_soc_daifmt_clock_provider_from_bitmap( \
snd_soc_daifmt_parse_clock_provider_as_bitmap(np, prefix))
+int snd_soc_get_stream_cpu(struct snd_soc_dai_link *dai_link, int stream);
+int snd_soc_get_dlc(const struct of_phandle_args *args,
+ struct snd_soc_dai_link_component *dlc);
+int snd_soc_of_get_dlc(struct device_node *of_node,
+ struct of_phandle_args *args,
+ struct snd_soc_dai_link_component *dlc,
+ int index);
int snd_soc_get_dai_id(struct device_node *ep);
int snd_soc_get_dai_name(const struct of_phandle_args *args,
const char **dai_name);
int snd_soc_of_get_dai_name(struct device_node *of_node,
- const char **dai_name);
+ const char **dai_name, int index);
int snd_soc_of_get_dai_link_codecs(struct device *dev,
struct device_node *of_node,
struct snd_soc_dai_link *dai_link);
@@ -1311,6 +1398,11 @@ int snd_soc_add_pcm_runtimes(struct snd_soc_card *card,
void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd);
+void snd_soc_dlc_use_cpu_as_platform(struct snd_soc_dai_link_component *platforms,
+ struct snd_soc_dai_link_component *cpus);
+struct of_phandle_args *snd_soc_copy_dai_args(struct device *dev,
+ struct of_phandle_args *args);
+struct snd_soc_dai *snd_soc_get_dai_via_args(struct of_phandle_args *dai_args);
struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv,
bool legacy_dai_naming);
diff --git a/include/sound/sof.h b/include/sound/sof.h
index d3c41f87ac31..05213bb515a3 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -52,11 +52,23 @@ enum sof_dsp_power_states {
/* Definitions for multiple IPCs */
enum sof_ipc_type {
- SOF_IPC,
- SOF_INTEL_IPC4,
+ SOF_IPC_TYPE_3,
+ SOF_IPC_TYPE_4,
SOF_IPC_TYPE_COUNT
};
+struct sof_loadable_file_profile {
+ enum sof_ipc_type ipc_type;
+
+ const char *fw_path;
+ const char *fw_path_postfix;
+ const char *fw_name;
+ const char *fw_lib_path;
+ const char *fw_lib_path_postfix;
+ const char *tplg_path;
+ const char *tplg_name;
+};
+
/*
* SOF Platform data.
*/
@@ -64,6 +76,14 @@ struct snd_sof_pdata {
const char *name;
const char *platform;
+ /*
+ * PCI SSID. As PCI does not define 0 as invalid, the subsystem_id_set
+ * flag indicates that a value has been written to these members.
+ */
+ unsigned short subsystem_vendor;
+ unsigned short subsystem_device;
+ bool subsystem_id_set;
+
struct device *dev;
/*
@@ -78,6 +98,9 @@ struct snd_sof_pdata {
/* descriptor */
const struct sof_dev_desc *desc;
+ /* platform's preferred IPC type and path overrides */
+ struct sof_loadable_file_profile ipc_file_profile_base;
+
/* firmware and topology filenames */
const char *fw_filename_prefix;
const char *fw_filename;
diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h
index ca8325353d41..6bc987bd4761 100644
--- a/include/sound/sof/dai-imx.h
+++ b/include/sound/sof/dai-imx.h
@@ -51,4 +51,11 @@ struct sof_ipc_dai_sai_params {
uint16_t tdm_slot_width;
uint16_t reserved2; /* alignment */
} __packed;
+
+/* MICFIL Configuration Request - SOF_IPC_DAI_MICFIL_CONFIG */
+struct sof_ipc_dai_micfil_params {
+ uint32_t pdm_rate;
+ uint32_t pdm_ch;
+} __packed;
+
#endif
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 3041f5805b7b..4773a5f616a4 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -88,6 +88,7 @@ enum sof_ipc_dai_type {
SOF_DAI_AMD_HS, /**< Amd HS */
SOF_DAI_AMD_SP_VIRTUAL, /**< AMD ACP SP VIRTUAL */
SOF_DAI_AMD_HS_VIRTUAL, /**< AMD ACP HS VIRTUAL */
+ SOF_DAI_IMX_MICFIL, /** < i.MX MICFIL PDM */
};
/* general purpose DAI configuration */
@@ -117,6 +118,7 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_acpdmic_params acpdmic;
struct sof_ipc_dai_acp_params acphs;
struct sof_ipc_dai_mtk_afe_params afe;
+ struct sof_ipc_dai_micfil_params micfil;
};
} __packed;
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
index 78568abe2673..1eb538e18236 100644
--- a/include/sound/sof/ipc4/header.h
+++ b/include/sound/sof/ipc4/header.h
@@ -106,12 +106,19 @@ enum sof_ipc4_global_msg {
SOF_IPC4_GLB_SAVE_PIPELINE,
SOF_IPC4_GLB_RESTORE_PIPELINE,
- /* Loads library (using Code Load or HD/A Host Output DMA) */
+ /*
+ * library loading
+ *
+ * Loads library (using Code Load or HD/A Host Output DMA)
+ */
SOF_IPC4_GLB_LOAD_LIBRARY,
+ /*
+ * Prepare the host DMA channel for library loading, must be followed by
+ * a SOF_IPC4_GLB_LOAD_LIBRARY message as the library loading step
+ */
+ SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE,
- /* 25: RESERVED - do not use */
-
- SOF_IPC4_GLB_INTERNAL_MESSAGE = 26,
+ SOF_IPC4_GLB_INTERNAL_MESSAGE,
/* Notification (FW to SW driver) */
SOF_IPC4_GLB_NOTIFICATION,
@@ -416,6 +423,12 @@ enum sof_ipc4_fw_config_params {
SOF_IPC4_FW_CFG_RESERVED,
SOF_IPC4_FW_CFG_POWER_GATING_POLICY,
SOF_IPC4_FW_CFG_ASSERT_MODE,
+ SOF_IPC4_FW_RESERVED1,
+ SOF_IPC4_FW_RESERVED2,
+ SOF_IPC4_FW_RESERVED3,
+ SOF_IPC4_FW_RESERVED4,
+ SOF_IPC4_FW_RESERVED5,
+ SOF_IPC4_FW_CONTEXT_SAVE
};
struct sof_ipc4_fw_version {
@@ -508,6 +521,52 @@ struct sof_ipc4_notify_resource_data {
uint32_t data[6];
} __packed __aligned(4);
+#define SOF_IPC4_DEBUG_DESCRIPTOR_SIZE 12 /* 3 x u32 */
+
+/*
+ * The debug memory window is divided into 16 slots, and the
+ * first slot is used as a recorder for the other 15 slots.
+ */
+#define SOF_IPC4_MAX_DEBUG_SLOTS 15
+#define SOF_IPC4_DEBUG_SLOT_SIZE 0x1000
+
+/* debug log slot types */
+#define SOF_IPC4_DEBUG_SLOT_UNUSED 0x00000000
+#define SOF_IPC4_DEBUG_SLOT_CRITICAL_LOG 0x54524300 /* byte 0: core ID */
+#define SOF_IPC4_DEBUG_SLOT_DEBUG_LOG 0x474f4c00 /* byte 0: core ID */
+#define SOF_IPC4_DEBUG_SLOT_GDB_STUB 0x42444700
+#define SOF_IPC4_DEBUG_SLOT_TELEMETRY 0x4c455400
+#define SOF_IPC4_DEBUG_SLOT_BROKEN 0x44414544
+
+/**
+ * struct sof_ipc4_notify_module_data - payload for module notification
+ * @instance_id: instance ID of the originator module of the notification
+ * @module_id: module ID of the originator of the notification
+ * @event_id: module specific event id
+ * @event_data_size: Size of the @event_data (if any) in bytes
+ * @event_data: Optional notification data, module and notification dependent
+ */
+struct sof_ipc4_notify_module_data {
+ uint16_t instance_id;
+ uint16_t module_id;
+ uint32_t event_id;
+ uint32_t event_data_size;
+ uint8_t event_data[];
+} __packed __aligned(4);
+
+/*
+ * ALSA kcontrol change notification
+ *
+ * The event_id of struct sof_ipc4_notify_module_data is divided into two u16:
+ * upper u16: magic number for ALSA kcontrol types: 0xA15A
+ * lower u16: param_id of the control, which is the type of the control
+ * The event_data contains the struct sof_ipc4_control_msg_payload of the control
+ * which sent the notification.
+ */
+#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_MAGIC_MASK GENMASK(31, 16)
+#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_MAGIC_VAL 0xA15A0000
+#define SOF_IPC4_NOTIFY_MODULE_EVENTID_ALSA_PARAMID_MASK GENMASK(15, 0)
+
/** @}*/
#endif
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 88560281d420..b3ca886fa28f 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -26,9 +26,9 @@ enum sof_comp_type {
SOF_COMP_MIXER,
SOF_COMP_MUX,
SOF_COMP_SRC,
- SOF_COMP_SPLITTER,
+ SOF_COMP_DEPRECATED0, /* Formerly SOF_COMP_SPLITTER */
SOF_COMP_TONE,
- SOF_COMP_SWITCH,
+ SOF_COMP_DEPRECATED1, /* Formerly SOF_COMP_SWITCH */
SOF_COMP_BUFFER,
SOF_COMP_EQ_IIR,
SOF_COMP_EQ_FIR,
@@ -39,6 +39,7 @@ enum sof_comp_type {
SOF_COMP_ASRC, /**< Asynchronous sample rate converter */
SOF_COMP_DCBLOCK,
SOF_COMP_SMART_AMP, /**< smart amplifier component */
+ SOF_COMP_MODULE_ADAPTER, /**< module adapter */
/* keep FILEREAD/FILEWRITE as the last ones */
SOF_COMP_FILEREAD = 10000, /**< host test based file IO */
SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */
@@ -59,7 +60,7 @@ struct sof_ipc_comp {
/* extended data length, 0 if no extended data */
uint32_t ext_data_length;
-} __packed;
+} __packed __aligned(4);
/*
* Component Buffers
@@ -68,14 +69,15 @@ struct sof_ipc_comp {
/*
* SOF memory capabilities, add new ones at the end
*/
-#define SOF_MEM_CAPS_RAM (1 << 0)
-#define SOF_MEM_CAPS_ROM (1 << 1)
-#define SOF_MEM_CAPS_EXT (1 << 2) /**< external */
-#define SOF_MEM_CAPS_LP (1 << 3) /**< low power */
-#define SOF_MEM_CAPS_HP (1 << 4) /**< high performance */
-#define SOF_MEM_CAPS_DMA (1 << 5) /**< DMA'able */
-#define SOF_MEM_CAPS_CACHE (1 << 6) /**< cacheable */
-#define SOF_MEM_CAPS_EXEC (1 << 7) /**< executable */
+#define SOF_MEM_CAPS_RAM BIT(0)
+#define SOF_MEM_CAPS_ROM BIT(1)
+#define SOF_MEM_CAPS_EXT BIT(2) /**< external */
+#define SOF_MEM_CAPS_LP BIT(3) /**< low power */
+#define SOF_MEM_CAPS_HP BIT(4) /**< high performance */
+#define SOF_MEM_CAPS_DMA BIT(5) /**< DMA'able */
+#define SOF_MEM_CAPS_CACHE BIT(6) /**< cacheable */
+#define SOF_MEM_CAPS_EXEC BIT(7) /**< executable */
+#define SOF_MEM_CAPS_L3 BIT(8) /**< L3 memory */
/*
* overrun will cause ring buffer overwrite, instead of XRUN.
@@ -87,6 +89,9 @@ struct sof_ipc_comp {
*/
#define SOF_BUF_UNDERRUN_PERMITTED BIT(1)
+/* the UUID size in bytes, shared between FW and host */
+#define SOF_UUID_SIZE 16
+
/* create new component buffer - SOF_IPC_TPLG_BUFFER_NEW */
struct sof_ipc_buffer {
struct sof_ipc_comp comp;
@@ -94,7 +99,7 @@ struct sof_ipc_buffer {
uint32_t caps; /**< SOF_MEM_CAPS_ */
uint32_t flags; /**< SOF_BUF_ flags defined above */
uint32_t reserved; /**< reserved for future use */
-} __packed;
+} __packed __aligned(4);
/* generic component config data - must always be after struct sof_ipc_comp */
struct sof_ipc_comp_config {
@@ -107,7 +112,7 @@ struct sof_ipc_comp_config {
/* reserved for future use */
uint32_t reserved[2];
-} __packed;
+} __packed __aligned(4);
/* generic host component */
struct sof_ipc_comp_host {
@@ -116,7 +121,7 @@ struct sof_ipc_comp_host {
uint32_t direction; /**< SOF_IPC_STREAM_ */
uint32_t no_irq; /**< don't send periodic IRQ to host/DSP */
uint32_t dmac_config; /**< DMA engine specific */
-} __packed;
+} __packed __aligned(4);
/* generic DAI component */
struct sof_ipc_comp_dai {
@@ -126,13 +131,13 @@ struct sof_ipc_comp_dai {
uint32_t dai_index; /**< index of this type dai */
uint32_t type; /**< DAI type - SOF_DAI_ */
uint32_t reserved; /**< reserved */
-} __packed;
+} __packed __aligned(4);
/* generic mixer component */
struct sof_ipc_comp_mixer {
struct sof_ipc_comp comp;
struct sof_ipc_comp_config config;
-} __packed;
+} __packed __aligned(4);
/* volume ramping types */
enum sof_volume_ramp {
@@ -140,6 +145,8 @@ enum sof_volume_ramp {
SOF_VOLUME_LOG,
SOF_VOLUME_LINEAR_ZC,
SOF_VOLUME_LOG_ZC,
+ SOF_VOLUME_WINDOWS_FADE,
+ SOF_VOLUME_WINDOWS_NO_FADE,
};
/* generic volume component */
@@ -151,7 +158,7 @@ struct sof_ipc_comp_volume {
uint32_t max_value;
uint32_t ramp; /**< SOF_VOLUME_ */
uint32_t initial_ramp; /**< ramp space in ms */
-} __packed;
+} __packed __aligned(4);
/* generic SRC component */
struct sof_ipc_comp_src {
@@ -161,7 +168,7 @@ struct sof_ipc_comp_src {
uint32_t source_rate; /**< source rate or 0 for variable */
uint32_t sink_rate; /**< sink rate or 0 for variable */
uint32_t rate_mask; /**< SOF_RATE_ supported rates */
-} __packed;
+} __packed __aligned(4);
/* generic ASRC component */
struct sof_ipc_comp_asrc {
@@ -187,13 +194,13 @@ struct sof_ipc_comp_asrc {
/* reserved for future use */
uint32_t reserved[4];
-} __attribute__((packed));
+} __packed __aligned(4);
/* generic MUX component */
struct sof_ipc_comp_mux {
struct sof_ipc_comp comp;
struct sof_ipc_comp_config config;
-} __packed;
+} __packed __aligned(4);
/* generic tone generator component */
struct sof_ipc_comp_tone {
@@ -208,7 +215,7 @@ struct sof_ipc_comp_tone {
int32_t period;
int32_t repeats;
int32_t ramp_step;
-} __packed;
+} __packed __aligned(4);
/** \brief Types of processing components */
enum sof_ipc_process_type {
@@ -234,8 +241,8 @@ struct sof_ipc_comp_process {
/* reserved for future use */
uint32_t reserved[7];
- uint8_t data[];
-} __packed;
+ unsigned char data[];
+} __packed __aligned(4);
/* frees components, buffers and pipelines
* SOF_IPC_TPLG_COMP_FREE, SOF_IPC_TPLG_PIPE_FREE, SOF_IPC_TPLG_BUFFER_FREE
@@ -243,13 +250,13 @@ struct sof_ipc_comp_process {
struct sof_ipc_free {
struct sof_ipc_cmd_hdr hdr;
uint32_t id;
-} __packed;
+} __packed __aligned(4);
struct sof_ipc_comp_reply {
struct sof_ipc_reply rhdr;
uint32_t id;
uint32_t offset;
-} __packed;
+} __packed __aligned(4);
/*
* Pipeline
@@ -274,25 +281,25 @@ struct sof_ipc_pipe_new {
uint32_t frames_per_sched;/**< output frames of pipeline, 0 is variable */
uint32_t xrun_limit_usecs; /**< report xruns greater than limit */
uint32_t time_domain; /**< scheduling time domain */
-} __packed;
+} __packed __aligned(4);
/* pipeline construction complete - SOF_IPC_TPLG_PIPE_COMPLETE */
struct sof_ipc_pipe_ready {
struct sof_ipc_cmd_hdr hdr;
uint32_t comp_id;
-} __packed;
+} __packed __aligned(4);
struct sof_ipc_pipe_free {
struct sof_ipc_cmd_hdr hdr;
uint32_t comp_id;
-} __packed;
+} __packed __aligned(4);
/* connect two components in pipeline - SOF_IPC_TPLG_COMP_CONNECT */
struct sof_ipc_pipe_comp_connect {
struct sof_ipc_cmd_hdr hdr;
uint32_t source_id;
uint32_t sink_id;
-} __packed;
+} __packed __aligned(4);
/* external events */
enum sof_event_types {
diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h
new file mode 100644
index 000000000000..ea9af2726a53
--- /dev/null
+++ b/include/sound/tas2781-dsp.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+//
+// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2781 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2781 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+// Author: Kevin Lu <kevin-lu@ti.com>
+//
+
+#ifndef __TASDEVICE_DSP_H__
+#define __TASDEVICE_DSP_H__
+
+#define MAIN_ALL_DEVICES 0x0d
+#define MAIN_DEVICE_A 0x01
+#define MAIN_DEVICE_B 0x08
+#define MAIN_DEVICE_C 0x10
+#define MAIN_DEVICE_D 0x14
+#define COEFF_DEVICE_A 0x03
+#define COEFF_DEVICE_B 0x0a
+#define COEFF_DEVICE_C 0x11
+#define COEFF_DEVICE_D 0x15
+#define PRE_DEVICE_A 0x04
+#define PRE_DEVICE_B 0x0b
+#define PRE_DEVICE_C 0x12
+#define PRE_DEVICE_D 0x16
+
+#define PPC3_VERSION 0x4100
+#define PPC3_VERSION_TAS2781 0x14600
+#define TASDEVICE_DEVICE_SUM 8
+#define TASDEVICE_CONFIG_SUM 64
+
+#define TASDEVICE_MAX_CHANNELS 8
+
+enum tasdevice_dsp_dev_idx {
+ TASDEVICE_DSP_TAS_2555 = 0,
+ TASDEVICE_DSP_TAS_2555_STEREO,
+ TASDEVICE_DSP_TAS_2557_MONO,
+ TASDEVICE_DSP_TAS_2557_DUAL_MONO,
+ TASDEVICE_DSP_TAS_2559,
+ TASDEVICE_DSP_TAS_2563,
+ TASDEVICE_DSP_TAS_2563_DUAL_MONO = 7,
+ TASDEVICE_DSP_TAS_2563_QUAD,
+ TASDEVICE_DSP_TAS_2563_21,
+ TASDEVICE_DSP_TAS_2781,
+ TASDEVICE_DSP_TAS_2781_DUAL_MONO,
+ TASDEVICE_DSP_TAS_2781_21,
+ TASDEVICE_DSP_TAS_2781_QUAD,
+ TASDEVICE_DSP_TAS_MAX_DEVICE
+};
+
+struct tasdevice_fw_fixed_hdr {
+ unsigned int fwsize;
+ unsigned int ppcver;
+ unsigned int drv_ver;
+};
+
+struct tasdevice_dspfw_hdr {
+ struct tasdevice_fw_fixed_hdr fixed_hdr;
+ unsigned short device_family;
+ unsigned short device;
+ unsigned char ndev;
+};
+
+struct tasdev_blk {
+ int nr_retry;
+ unsigned int type;
+ unsigned char is_pchksum_present;
+ unsigned char pchksum;
+ unsigned char is_ychksum_present;
+ unsigned char ychksum;
+ unsigned int nr_cmds;
+ unsigned int blk_size;
+ unsigned int nr_subblocks;
+ /* fixed m68k compiling issue, storing the dev_idx as a member of block
+ * can reduce unnecessary timeand system resource comsumption of
+ * dev_idx mapping every time the block data writing to the dsp.
+ */
+ unsigned char dev_idx;
+ unsigned char *data;
+};
+
+struct tasdevice_data {
+ char name[64];
+ unsigned int nr_blk;
+ struct tasdev_blk *dev_blks;
+};
+
+struct tasdevice_prog {
+ unsigned int prog_size;
+ struct tasdevice_data dev_data;
+};
+
+struct tasdevice_config {
+ unsigned int cfg_size;
+ char name[64];
+ struct tasdevice_data dev_data;
+};
+
+struct tasdevice_calibration {
+ struct tasdevice_data dev_data;
+};
+
+struct tasdevice_fw {
+ struct tasdevice_dspfw_hdr fw_hdr;
+ unsigned short nr_programs;
+ struct tasdevice_prog *programs;
+ unsigned short nr_configurations;
+ struct tasdevice_config *configs;
+ unsigned short nr_calibrations;
+ struct tasdevice_calibration *calibrations;
+ struct device *dev;
+};
+
+enum tasdevice_dsp_fw_state {
+ TASDEVICE_DSP_FW_NONE = 0,
+ TASDEVICE_DSP_FW_PENDING,
+ TASDEVICE_DSP_FW_FAIL,
+ TASDEVICE_DSP_FW_ALL_OK,
+};
+
+enum tasdevice_bin_blk_type {
+ TASDEVICE_BIN_BLK_COEFF = 1,
+ TASDEVICE_BIN_BLK_POST_POWER_UP,
+ TASDEVICE_BIN_BLK_PRE_SHUTDOWN,
+ TASDEVICE_BIN_BLK_PRE_POWER_UP,
+ TASDEVICE_BIN_BLK_POST_SHUTDOWN
+};
+
+struct tasdevice_rca_hdr {
+ unsigned int img_sz;
+ unsigned int checksum;
+ unsigned int binary_version_num;
+ unsigned int drv_fw_version;
+ unsigned char plat_type;
+ unsigned char dev_family;
+ unsigned char reserve;
+ unsigned char ndev;
+ unsigned char devs[TASDEVICE_DEVICE_SUM];
+ unsigned int nconfig;
+ unsigned int config_size[TASDEVICE_CONFIG_SUM];
+};
+
+struct tasdev_blk_data {
+ unsigned char dev_idx;
+ unsigned char block_type;
+ unsigned short yram_checksum;
+ unsigned int block_size;
+ unsigned int n_subblks;
+ unsigned char *regdata;
+};
+
+struct tasdevice_config_info {
+ unsigned int nblocks;
+ unsigned int real_nblocks;
+ unsigned char active_dev;
+ struct tasdev_blk_data **blk_data;
+};
+
+struct tasdevice_rca {
+ struct tasdevice_rca_hdr fw_hdr;
+ int ncfgs;
+ struct tasdevice_config_info **cfg_info;
+ int profile_cfg_id;
+};
+
+void tasdevice_select_cfg_blk(void *context, int conf_no,
+ unsigned char block_type);
+void tasdevice_config_info_remove(void *context);
+void tasdevice_dsp_remove(void *context);
+int tasdevice_dsp_parser(void *context);
+int tasdevice_rca_parser(void *context, const struct firmware *fmw);
+void tasdevice_dsp_remove(void *context);
+void tasdevice_calbin_remove(void *context);
+int tasdevice_select_tuningprm_cfg(void *context, int prm,
+ int cfg_no, int rca_conf_no);
+int tasdevice_prmg_load(void *context, int prm_no);
+int tasdevice_prmg_calibdata_load(void *context, int prm_no);
+void tasdevice_tuning_switch(void *context, int state);
+int tas2781_load_calibration(void *context, char *file_name,
+ unsigned short i);
+
+#endif
diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
new file mode 100644
index 000000000000..4038dd421150
--- /dev/null
+++ b/include/sound/tas2781-tlv.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
+//
+// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2781 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2781 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+//
+
+#ifndef __TAS2781_TLV_H__
+#define __TAS2781_TLV_H__
+
+static const DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
+static const DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
+
+#endif
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
new file mode 100644
index 000000000000..9aff384941de
--- /dev/null
+++ b/include/sound/tas2781.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
+//
+// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2563/TAS2781 driver implements a flexible and configurable
+// algo coefficient setting for one, two, or even multiple
+// TAS2563/TAS2781 chips.
+//
+// Author: Shenghao Ding <shenghao-ding@ti.com>
+// Author: Kevin Lu <kevin-lu@ti.com>
+//
+
+#ifndef __TAS2781_H__
+#define __TAS2781_H__
+
+#include "tas2781-dsp.h"
+
+/* version number */
+#define TAS2781_DRV_VER 1
+#define SMARTAMP_MODULE_NAME "tas2781"
+#define TAS2781_GLOBAL_ADDR 0x40
+#define TAS2563_GLOBAL_ADDR 0x48
+#define TASDEVICE_RATES (SNDRV_PCM_RATE_44100 |\
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_88200)
+
+#define TASDEVICE_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+/*PAGE Control Register (available in page0 of each book) */
+#define TASDEVICE_PAGE_SELECT 0x00
+#define TASDEVICE_BOOKCTL_PAGE 0x00
+#define TASDEVICE_BOOKCTL_REG 127
+#define TASDEVICE_BOOK_ID(reg) (reg / (256 * 128))
+#define TASDEVICE_PAGE_ID(reg) ((reg % (256 * 128)) / 128)
+#define TASDEVICE_PAGE_REG(reg) ((reg % (256 * 128)) % 128)
+#define TASDEVICE_PGRG(reg) (reg % (256 * 128))
+#define TASDEVICE_REG(book, page, reg) (((book * 256 * 128) + \
+ (page * 128)) + reg)
+
+/*Software Reset */
+#define TAS2781_REG_SWRESET TASDEVICE_REG(0x0, 0X0, 0x01)
+#define TAS2781_REG_SWRESET_RESET BIT(0)
+
+/*I2C Checksum */
+#define TASDEVICE_I2CChecksum TASDEVICE_REG(0x0, 0x0, 0x7E)
+
+/* Volume control */
+#define TAS2781_DVC_LVL TASDEVICE_REG(0x0, 0x0, 0x1A)
+#define TAS2781_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x03)
+#define TAS2781_AMP_LEVEL_MASK GENMASK(5, 1)
+
+#define TASDEVICE_CMD_SING_W 0x1
+#define TASDEVICE_CMD_BURST 0x2
+#define TASDEVICE_CMD_DELAY 0x3
+#define TASDEVICE_CMD_FIELD_W 0x4
+
+enum audio_device {
+ TAS2563,
+ TAS2781,
+};
+
+enum device_catlog_id {
+ LENOVO = 0,
+ OTHERS
+};
+
+struct tasdevice {
+ struct tasdevice_fw *cali_data_fmw;
+ unsigned int dev_addr;
+ unsigned int err_code;
+ unsigned char cur_book;
+ short cur_prog;
+ short cur_conf;
+ bool is_loading;
+ bool is_loaderr;
+};
+
+struct tasdevice_irqinfo {
+ int irq_gpio;
+ int irq;
+};
+
+struct calidata {
+ unsigned char *data;
+ unsigned long total_sz;
+};
+
+struct tasdevice_priv {
+ struct tasdevice tasdevice[TASDEVICE_MAX_CHANNELS];
+ struct tasdevice_irqinfo irq_info;
+ struct tasdevice_rca rcabin;
+ struct calidata cali_data;
+ struct tasdevice_fw *fmw;
+ struct gpio_desc *reset;
+ struct mutex codec_lock;
+ struct regmap *regmap;
+ struct device *dev;
+ struct tm tm;
+
+ enum device_catlog_id catlog_id;
+ const char *acpi_subsystem_id;
+ unsigned char cal_binaryname[TASDEVICE_MAX_CHANNELS][64];
+ unsigned char crc8_lkp_tbl[CRC8_TABLE_SIZE];
+ unsigned char coef_binaryname[64];
+ unsigned char rca_binaryname[64];
+ unsigned char dev_name[32];
+ unsigned char ndev;
+ unsigned int magic_num;
+ unsigned int chip_id;
+ unsigned int sysclk;
+
+ int cur_prog;
+ int cur_conf;
+ int fw_state;
+ int index;
+ void *client;
+ void *codec;
+ bool force_fwload_status;
+ bool playback_started;
+ bool isacpi;
+ unsigned int global_addr;
+
+ int (*fw_parse_variable_header)(struct tasdevice_priv *tas_priv,
+ const struct firmware *fmw, int offset);
+ int (*fw_parse_program_data)(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw,
+ const struct firmware *fmw, int offset);
+ int (*fw_parse_configuration_data)(struct tasdevice_priv *tas_priv,
+ struct tasdevice_fw *tas_fmw,
+ const struct firmware *fmw, int offset);
+ int (*tasdevice_load_block)(struct tasdevice_priv *tas_priv,
+ struct tasdev_blk *block);
+
+ int (*save_calibration)(struct tasdevice_priv *tas_priv);
+ void (*apply_calibration)(struct tasdevice_priv *tas_priv);
+};
+
+void tas2781_reset(struct tasdevice_priv *tas_dev);
+int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
+ struct module *module,
+ void (*cont)(const struct firmware *fw, void *context));
+struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c);
+int tasdevice_init(struct tasdevice_priv *tas_priv);
+void tasdevice_remove(struct tasdevice_priv *tas_priv);
+int tasdevice_save_calibration(struct tasdevice_priv *tas_priv);
+void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv);
+int tasdevice_dev_read(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned int *value);
+int tasdevice_dev_write(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned int value);
+int tasdevice_dev_bulk_write(
+ struct tasdevice_priv *tas_priv, unsigned short chn,
+ unsigned int reg, unsigned char *p_data, unsigned int n_length);
+int tasdevice_dev_bulk_read(struct tasdevice_priv *tas_priv,
+ unsigned short chn, unsigned int reg, unsigned char *p_data,
+ unsigned int n_length);
+int tasdevice_dev_update_bits(
+ struct tasdevice_priv *tasdevice, unsigned short chn,
+ unsigned int reg, unsigned int mask, unsigned int value);
+int tasdevice_amp_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_amp_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_digital_putvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+int tasdevice_digital_getvol(struct tasdevice_priv *tas_priv,
+ struct snd_ctl_elem_value *ucontrol, struct soc_mixer_control *mc);
+
+#endif /* __TAS2781_H__ */
diff --git a/include/sound/uda134x.h b/include/sound/uda134x.h
deleted file mode 100644
index db82516da162..000000000000
--- a/include/sound/uda134x.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * uda134x.h -- UDA134x ALSA SoC Codec driver
- *
- * Copyright 2007 Dension Audio Systems Ltd.
- * Author: Zoltan Devai
- */
-
-#ifndef _UDA134X_H
-#define _UDA134X_H
-
-#include <sound/l3.h>
-
-struct uda134x_platform_data {
- struct l3_pins l3;
- void (*power) (int);
- int model;
-#define UDA134X_UDA1340 1
-#define UDA134X_UDA1341 2
-#define UDA134X_UDA1344 3
-#define UDA134X_UDA1345 4
-};
-
-#endif /* _UDA134X_H */
diff --git a/include/sound/ump.h b/include/sound/ump.h
new file mode 100644
index 000000000000..91238dabe307
--- /dev/null
+++ b/include/sound/ump.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Universal MIDI Packet (UMP) Support
+ */
+#ifndef __SOUND_UMP_H
+#define __SOUND_UMP_H
+
+#include <sound/rawmidi.h>
+
+struct snd_ump_endpoint;
+struct snd_ump_block;
+struct snd_ump_ops;
+struct ump_cvt_to_ump;
+struct snd_seq_ump_ops;
+
+struct snd_ump_endpoint {
+ struct snd_rawmidi core; /* raw UMP access */
+
+ struct snd_ump_endpoint_info info;
+
+ const struct snd_ump_ops *ops; /* UMP ops set by the driver */
+ struct snd_rawmidi_substream *substreams[2]; /* opened substreams */
+
+ void *private_data;
+ void (*private_free)(struct snd_ump_endpoint *ump);
+
+ /* UMP Stream message processing */
+ u32 stream_wait_for; /* expected stream message status */
+ bool stream_finished; /* set when message has been processed */
+ bool parsed; /* UMP / FB parse finished? */
+ bool no_process_stream; /* suppress UMP stream messages handling */
+ wait_queue_head_t stream_wait;
+ struct snd_rawmidi_file stream_rfile;
+
+ struct list_head block_list; /* list of snd_ump_block objects */
+
+ /* intermediate buffer for UMP input */
+ u32 input_buf[4];
+ int input_buf_head;
+ int input_pending;
+
+ struct mutex open_mutex;
+
+#if IS_ENABLED(CONFIG_SND_UMP_LEGACY_RAWMIDI)
+ spinlock_t legacy_locks[2];
+ struct snd_rawmidi *legacy_rmidi;
+ struct snd_rawmidi_substream *legacy_substreams[2][SNDRV_UMP_MAX_GROUPS];
+ unsigned char legacy_mapping[SNDRV_UMP_MAX_GROUPS];
+
+ /* for legacy output; need to open the actual substream unlike input */
+ int legacy_out_opens;
+ struct snd_rawmidi_file legacy_out_rfile;
+ struct ump_cvt_to_ump *out_cvts;
+#endif
+
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
+ struct snd_seq_device *seq_dev;
+ const struct snd_seq_ump_ops *seq_ops;
+ void *seq_client;
+#endif
+};
+
+/* ops filled by UMP drivers */
+struct snd_ump_ops {
+ int (*open)(struct snd_ump_endpoint *ump, int dir);
+ void (*close)(struct snd_ump_endpoint *ump, int dir);
+ void (*trigger)(struct snd_ump_endpoint *ump, int dir, int up);
+ void (*drain)(struct snd_ump_endpoint *ump, int dir);
+};
+
+/* ops filled by sequencer binding */
+struct snd_seq_ump_ops {
+ void (*input_receive)(struct snd_ump_endpoint *ump,
+ const u32 *data, int words);
+ int (*notify_fb_change)(struct snd_ump_endpoint *ump,
+ struct snd_ump_block *fb);
+ int (*switch_protocol)(struct snd_ump_endpoint *ump);
+};
+
+struct snd_ump_block {
+ struct snd_ump_block_info info;
+ struct snd_ump_endpoint *ump;
+
+ void *private_data;
+ void (*private_free)(struct snd_ump_block *blk);
+
+ struct list_head list;
+};
+
+#define rawmidi_to_ump(rmidi) container_of(rmidi, struct snd_ump_endpoint, core)
+
+int snd_ump_endpoint_new(struct snd_card *card, char *id, int device,
+ int output, int input,
+ struct snd_ump_endpoint **ump_ret);
+int snd_ump_parse_endpoint(struct snd_ump_endpoint *ump);
+int snd_ump_block_new(struct snd_ump_endpoint *ump, unsigned int blk,
+ unsigned int direction, unsigned int first_group,
+ unsigned int num_groups, struct snd_ump_block **blk_ret);
+int snd_ump_receive(struct snd_ump_endpoint *ump, const u32 *buffer, int count);
+int snd_ump_transmit(struct snd_ump_endpoint *ump, u32 *buffer, int count);
+
+#if IS_ENABLED(CONFIG_SND_UMP_LEGACY_RAWMIDI)
+int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ char *id, int device);
+#else
+static inline int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ char *id, int device)
+{
+ return 0;
+}
+#endif
+
+int snd_ump_receive_ump_val(struct snd_ump_endpoint *ump, u32 val);
+int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol);
+
+/*
+ * Some definitions for UMP
+ */
+
+/* MIDI 2.0 Message Type */
+enum {
+ UMP_MSG_TYPE_UTILITY = 0x00,
+ UMP_MSG_TYPE_SYSTEM = 0x01,
+ UMP_MSG_TYPE_MIDI1_CHANNEL_VOICE = 0x02,
+ UMP_MSG_TYPE_DATA = 0x03,
+ UMP_MSG_TYPE_MIDI2_CHANNEL_VOICE = 0x04,
+ UMP_MSG_TYPE_EXTENDED_DATA = 0x05,
+ UMP_MSG_TYPE_FLEX_DATA = 0x0d,
+ UMP_MSG_TYPE_STREAM = 0x0f,
+};
+
+/* MIDI 2.0 SysEx / Data Status; same values for both 7-bit and 8-bit SysEx */
+enum {
+ UMP_SYSEX_STATUS_SINGLE = 0,
+ UMP_SYSEX_STATUS_START = 1,
+ UMP_SYSEX_STATUS_CONTINUE = 2,
+ UMP_SYSEX_STATUS_END = 3,
+};
+
+/* UMP Utility Type Status (type 0x0) */
+enum {
+ UMP_UTILITY_MSG_STATUS_NOOP = 0x00,
+ UMP_UTILITY_MSG_STATUS_JR_CLOCK = 0x01,
+ UMP_UTILITY_MSG_STATUS_JR_TSTAMP = 0x02,
+ UMP_UTILITY_MSG_STATUS_DCTPQ = 0x03,
+ UMP_UTILITY_MSG_STATUS_DC = 0x04,
+};
+
+/* UMP Stream Message Status (type 0xf) */
+enum {
+ UMP_STREAM_MSG_STATUS_EP_DISCOVERY = 0x00,
+ UMP_STREAM_MSG_STATUS_EP_INFO = 0x01,
+ UMP_STREAM_MSG_STATUS_DEVICE_INFO = 0x02,
+ UMP_STREAM_MSG_STATUS_EP_NAME = 0x03,
+ UMP_STREAM_MSG_STATUS_PRODUCT_ID = 0x04,
+ UMP_STREAM_MSG_STATUS_STREAM_CFG_REQUEST = 0x05,
+ UMP_STREAM_MSG_STATUS_STREAM_CFG = 0x06,
+ UMP_STREAM_MSG_STATUS_FB_DISCOVERY = 0x10,
+ UMP_STREAM_MSG_STATUS_FB_INFO = 0x11,
+ UMP_STREAM_MSG_STATUS_FB_NAME = 0x12,
+ UMP_STREAM_MSG_STATUS_START_CLIP = 0x20,
+ UMP_STREAM_MSG_STATUS_END_CLIP = 0x21,
+};
+
+/* UMP Endpoint Discovery filter bitmap */
+enum {
+ UMP_STREAM_MSG_REQUEST_EP_INFO = (1U << 0),
+ UMP_STREAM_MSG_REQUEST_DEVICE_INFO = (1U << 1),
+ UMP_STREAM_MSG_REQUEST_EP_NAME = (1U << 2),
+ UMP_STREAM_MSG_REQUEST_PRODUCT_ID = (1U << 3),
+ UMP_STREAM_MSG_REQUEST_STREAM_CFG = (1U << 4),
+};
+
+/* UMP Function Block Discovery filter bitmap */
+enum {
+ UMP_STREAM_MSG_REQUEST_FB_INFO = (1U << 0),
+ UMP_STREAM_MSG_REQUEST_FB_NAME = (1U << 1),
+};
+
+/* UMP Endpoint Info capability bits (used for protocol request/notify, too) */
+enum {
+ UMP_STREAM_MSG_EP_INFO_CAP_TXJR = (1U << 0), /* Sending JRTS */
+ UMP_STREAM_MSG_EP_INFO_CAP_RXJR = (1U << 1), /* Receiving JRTS */
+ UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 = (1U << 8), /* MIDI 1.0 */
+ UMP_STREAM_MSG_EP_INFO_CAP_MIDI2 = (1U << 9), /* MIDI 2.0 */
+};
+
+/* UMP EP / FB name string format; same as SysEx string handling */
+enum {
+ UMP_STREAM_MSG_FORMAT_SINGLE = 0,
+ UMP_STREAM_MSG_FORMAT_START = 1,
+ UMP_STREAM_MSG_FORMAT_CONTINUE = 2,
+ UMP_STREAM_MSG_FORMAT_END = 3,
+};
+
+/*
+ * Helpers for retrieving / filling bits from UMP
+ */
+/* get the message type (4bit) from a UMP packet (header) */
+static inline unsigned char ump_message_type(u32 data)
+{
+ return data >> 28;
+}
+
+/* get the group number (0-based, 4bit) from a UMP packet (header) */
+static inline unsigned char ump_message_group(u32 data)
+{
+ return (data >> 24) & 0x0f;
+}
+
+/* get the MIDI status code (4bit) from a UMP packet (header) */
+static inline unsigned char ump_message_status_code(u32 data)
+{
+ return (data >> 20) & 0x0f;
+}
+
+/* get the MIDI channel number (0-based, 4bit) from a UMP packet (header) */
+static inline unsigned char ump_message_channel(u32 data)
+{
+ return (data >> 16) & 0x0f;
+}
+
+/* get the MIDI status + channel combo byte (8bit) from a UMP packet (header) */
+static inline unsigned char ump_message_status_channel(u32 data)
+{
+ return (data >> 16) & 0xff;
+}
+
+/* compose a UMP packet (header) from type, group and status values */
+static inline u32 ump_compose(unsigned char type, unsigned char group,
+ unsigned char status, unsigned char channel)
+{
+ return ((u32)type << 28) | ((u32)group << 24) | ((u32)status << 20) |
+ ((u32)channel << 16);
+}
+
+/* get SysEx message status (for both 7 and 8bits) from a UMP packet (header) */
+static inline unsigned char ump_sysex_message_status(u32 data)
+{
+ return (data >> 20) & 0xf;
+}
+
+/* get SysEx message length (for both 7 and 8bits) from a UMP packet (header) */
+static inline unsigned char ump_sysex_message_length(u32 data)
+{
+ return (data >> 16) & 0xf;
+}
+
+/* For Stream Messages */
+static inline unsigned char ump_stream_message_format(u32 data)
+{
+ return (data >> 26) & 0x03;
+}
+
+static inline unsigned int ump_stream_message_status(u32 data)
+{
+ return (data >> 16) & 0x3ff;
+}
+
+static inline u32 ump_stream_compose(unsigned char status, unsigned short form)
+{
+ return (UMP_MSG_TYPE_STREAM << 28) | ((u32)form << 26) |
+ ((u32)status << 16);
+}
+
+#define ump_is_groupless_msg(type) \
+ ((type) == UMP_MSG_TYPE_UTILITY || (type) == UMP_MSG_TYPE_STREAM)
+
+#endif /* __SOUND_UMP_H */
diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h
new file mode 100644
index 000000000000..28c364c63245
--- /dev/null
+++ b/include/sound/ump_convert.h
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#ifndef __SOUND_UMP_CONVERT_H
+#define __SOUND_UMP_CONVERT_H
+
+#include <sound/ump_msg.h>
+
+/* context for converting from legacy control messages to UMP packet */
+struct ump_cvt_to_ump_bank {
+ bool rpn_set;
+ bool nrpn_set;
+ bool bank_set;
+ unsigned char cc_rpn_msb, cc_rpn_lsb;
+ unsigned char cc_nrpn_msb, cc_nrpn_lsb;
+ unsigned char cc_data_msb, cc_data_lsb;
+ unsigned char cc_bank_msb, cc_bank_lsb;
+};
+
+/* context for converting from MIDI1 byte stream to UMP packet */
+struct ump_cvt_to_ump {
+ /* MIDI1 intermediate buffer */
+ unsigned char buf[4];
+ int len;
+ int cmd_bytes;
+
+ /* UMP output packet */
+ u32 ump[4];
+ int ump_bytes;
+
+ /* various status */
+ unsigned int in_sysex;
+ struct ump_cvt_to_ump_bank bank[16]; /* per channel */
+};
+
+int snd_ump_convert_from_ump(const u32 *data, unsigned char *dst,
+ unsigned char *group_ret);
+void snd_ump_convert_to_ump(struct ump_cvt_to_ump *cvt, unsigned char group,
+ unsigned int protocol, unsigned char c);
+
+/* reset the converter context, called at each open to ump */
+static inline void snd_ump_convert_reset(struct ump_cvt_to_ump *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+}
+
+#endif /* __SOUND_UMP_CONVERT_H */
diff --git a/include/sound/ump_msg.h b/include/sound/ump_msg.h
new file mode 100644
index 000000000000..72f60ddfea75
--- /dev/null
+++ b/include/sound/ump_msg.h
@@ -0,0 +1,765 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Universal MIDI Packet (UMP): Message Definitions
+ */
+#ifndef __SOUND_UMP_MSG_H
+#define __SOUND_UMP_MSG_H
+
+/* MIDI 1.0 / 2.0 Status Code (4bit) */
+enum {
+ UMP_MSG_STATUS_PER_NOTE_RCC = 0x0,
+ UMP_MSG_STATUS_PER_NOTE_ACC = 0x1,
+ UMP_MSG_STATUS_RPN = 0x2,
+ UMP_MSG_STATUS_NRPN = 0x3,
+ UMP_MSG_STATUS_RELATIVE_RPN = 0x4,
+ UMP_MSG_STATUS_RELATIVE_NRPN = 0x5,
+ UMP_MSG_STATUS_PER_NOTE_PITCH_BEND = 0x6,
+ UMP_MSG_STATUS_NOTE_OFF = 0x8,
+ UMP_MSG_STATUS_NOTE_ON = 0x9,
+ UMP_MSG_STATUS_POLY_PRESSURE = 0xa,
+ UMP_MSG_STATUS_CC = 0xb,
+ UMP_MSG_STATUS_PROGRAM = 0xc,
+ UMP_MSG_STATUS_CHANNEL_PRESSURE = 0xd,
+ UMP_MSG_STATUS_PITCH_BEND = 0xe,
+ UMP_MSG_STATUS_PER_NOTE_MGMT = 0xf,
+};
+
+/* MIDI 1.0 Channel Control (7bit) */
+enum {
+ UMP_CC_BANK_SELECT = 0,
+ UMP_CC_MODULATION = 1,
+ UMP_CC_BREATH = 2,
+ UMP_CC_FOOT = 4,
+ UMP_CC_PORTAMENTO_TIME = 5,
+ UMP_CC_DATA = 6,
+ UMP_CC_VOLUME = 7,
+ UMP_CC_BALANCE = 8,
+ UMP_CC_PAN = 10,
+ UMP_CC_EXPRESSION = 11,
+ UMP_CC_EFFECT_CONTROL_1 = 12,
+ UMP_CC_EFFECT_CONTROL_2 = 13,
+ UMP_CC_GP_1 = 16,
+ UMP_CC_GP_2 = 17,
+ UMP_CC_GP_3 = 18,
+ UMP_CC_GP_4 = 19,
+ UMP_CC_BANK_SELECT_LSB = 32,
+ UMP_CC_MODULATION_LSB = 33,
+ UMP_CC_BREATH_LSB = 34,
+ UMP_CC_FOOT_LSB = 36,
+ UMP_CC_PORTAMENTO_TIME_LSB = 37,
+ UMP_CC_DATA_LSB = 38,
+ UMP_CC_VOLUME_LSB = 39,
+ UMP_CC_BALANCE_LSB = 40,
+ UMP_CC_PAN_LSB = 42,
+ UMP_CC_EXPRESSION_LSB = 43,
+ UMP_CC_EFFECT1_LSB = 44,
+ UMP_CC_EFFECT2_LSB = 45,
+ UMP_CC_GP_1_LSB = 48,
+ UMP_CC_GP_2_LSB = 49,
+ UMP_CC_GP_3_LSB = 50,
+ UMP_CC_GP_4_LSB = 51,
+ UMP_CC_SUSTAIN = 64,
+ UMP_CC_PORTAMENTO_SWITCH = 65,
+ UMP_CC_SOSTENUTO = 66,
+ UMP_CC_SOFT_PEDAL = 67,
+ UMP_CC_LEGATO = 68,
+ UMP_CC_HOLD_2 = 69,
+ UMP_CC_SOUND_CONTROLLER_1 = 70,
+ UMP_CC_SOUND_CONTROLLER_2 = 71,
+ UMP_CC_SOUND_CONTROLLER_3 = 72,
+ UMP_CC_SOUND_CONTROLLER_4 = 73,
+ UMP_CC_SOUND_CONTROLLER_5 = 74,
+ UMP_CC_SOUND_CONTROLLER_6 = 75,
+ UMP_CC_SOUND_CONTROLLER_7 = 76,
+ UMP_CC_SOUND_CONTROLLER_8 = 77,
+ UMP_CC_SOUND_CONTROLLER_9 = 78,
+ UMP_CC_SOUND_CONTROLLER_10 = 79,
+ UMP_CC_GP_5 = 80,
+ UMP_CC_GP_6 = 81,
+ UMP_CC_GP_7 = 82,
+ UMP_CC_GP_8 = 83,
+ UMP_CC_PORTAMENTO_CONTROL = 84,
+ UMP_CC_EFFECT_1 = 91,
+ UMP_CC_EFFECT_2 = 92,
+ UMP_CC_EFFECT_3 = 93,
+ UMP_CC_EFFECT_4 = 94,
+ UMP_CC_EFFECT_5 = 95,
+ UMP_CC_DATA_INC = 96,
+ UMP_CC_DATA_DEC = 97,
+ UMP_CC_NRPN_LSB = 98,
+ UMP_CC_NRPN_MSB = 99,
+ UMP_CC_RPN_LSB = 100,
+ UMP_CC_RPN_MSB = 101,
+ UMP_CC_ALL_SOUND_OFF = 120,
+ UMP_CC_RESET_ALL = 121,
+ UMP_CC_LOCAL_CONTROL = 122,
+ UMP_CC_ALL_NOTES_OFF = 123,
+ UMP_CC_OMNI_OFF = 124,
+ UMP_CC_OMNI_ON = 125,
+ UMP_CC_POLY_OFF = 126,
+ UMP_CC_POLY_ON = 127,
+};
+
+/* MIDI 1.0 / 2.0 System Messages (0xfx) */
+enum {
+ UMP_SYSTEM_STATUS_MIDI_TIME_CODE = 0xf1,
+ UMP_SYSTEM_STATUS_SONG_POSITION = 0xf2,
+ UMP_SYSTEM_STATUS_SONG_SELECT = 0xf3,
+ UMP_SYSTEM_STATUS_TUNE_REQUEST = 0xf6,
+ UMP_SYSTEM_STATUS_TIMING_CLOCK = 0xf8,
+ UMP_SYSTEM_STATUS_START = 0xfa,
+ UMP_SYSTEM_STATUS_CONTINUE = 0xfb,
+ UMP_SYSTEM_STATUS_STOP = 0xfc,
+ UMP_SYSTEM_STATUS_ACTIVE_SENSING = 0xfe,
+ UMP_SYSTEM_STATUS_RESET = 0xff,
+};
+
+/* MIDI 1.0 Realtime and SysEx status messages (0xfx) */
+enum {
+ UMP_MIDI1_MSG_REALTIME = 0xf0, /* mask */
+ UMP_MIDI1_MSG_SYSEX_START = 0xf0,
+ UMP_MIDI1_MSG_SYSEX_END = 0xf7,
+};
+
+/*
+ * UMP Message Definitions
+ */
+
+/* MIDI 1.0 Note Off / Note On (32bit) */
+struct snd_ump_midi1_msg_note {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 velocity:8;
+#else
+ u32 velocity:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Poly Pressure (32bit) */
+struct snd_ump_midi1_msg_paf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 data:8;
+#else
+ u32 data:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Control Change (32bit) */
+struct snd_ump_midi1_msg_cc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 index:8;
+ u32 data:8;
+#else
+ u32 data:8;
+ u32 index:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Program Change (32bit) */
+struct snd_ump_midi1_msg_program {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 program:8;
+ u32 reserved:8;
+#else
+ u32 reserved:8;
+ u32 program:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Channel Pressure (32bit) */
+struct snd_ump_midi1_msg_caf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 data:8;
+ u32 reserved:8;
+#else
+ u32 reserved:8;
+ u32 data:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 Pitch Bend (32bit) */
+struct snd_ump_midi1_msg_pitchbend {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 data_lsb:8;
+ u32 data_msb:8;
+#else
+ u32 data_msb:8;
+ u32 data_lsb:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* System Common and Real Time messages (32bit); no channel field */
+struct snd_ump_system_msg {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 type:4;
+ u32 group:4;
+ u32 status:8;
+ u32 parm1:8;
+ u32 parm2:8;
+#else
+ u32 parm2:8;
+ u32 parm1:8;
+ u32 status:8;
+ u32 group:4;
+ u32 type:4;
+#endif
+} __packed;
+
+/* MIDI 1.0 UMP CVM (32bit) */
+union snd_ump_midi1_msg {
+ struct snd_ump_midi1_msg_note note;
+ struct snd_ump_midi1_msg_paf paf;
+ struct snd_ump_midi1_msg_cc cc;
+ struct snd_ump_midi1_msg_program pg;
+ struct snd_ump_midi1_msg_caf caf;
+ struct snd_ump_midi1_msg_pitchbend pb;
+ struct snd_ump_system_msg system;
+ u32 raw;
+};
+
+/* MIDI 2.0 Note Off / Note On (64bit) */
+struct snd_ump_midi2_msg_note {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 attribute_type:8;
+ /* 1 */
+ u32 velocity:16;
+ u32 attribute_data:16;
+#else
+ /* 0 */
+ u32 attribute_type:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 attribute_data:16;
+ u32 velocity:16;
+#endif
+} __packed;
+
+/* MIDI 2.0 Poly Pressure (64bit) */
+struct snd_ump_midi2_msg_paf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 reserved:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Per-Note Controller (64bit) */
+struct snd_ump_midi2_msg_pernote_cc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 index:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 index:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Per-Note Management (64bit) */
+struct snd_ump_midi2_msg_pernote_mgmt {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 flags:8;
+ /* 1 */
+ u32 reserved;
+#else
+ /* 0 */
+ u32 flags:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 reserved;
+#endif
+} __packed;
+
+/* MIDI 2.0 Control Change (64bit) */
+struct snd_ump_midi2_msg_cc {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 index:8;
+ u32 reserved:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:8;
+ u32 index:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Registered Controller (RPN) / Assignable Controller (NRPN) (64bit) */
+struct snd_ump_midi2_msg_rpn {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 bank:8;
+ u32 index:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 index:8;
+ u32 bank:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Program Change (64bit) */
+struct snd_ump_midi2_msg_program {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 reserved:15;
+ u32 bank_valid:1;
+ /* 1 */
+ u32 program:8;
+ u32 reserved2:8;
+ u32 bank_msb:8;
+ u32 bank_lsb:8;
+#else
+ /* 0 */
+ u32 bank_valid:1;
+ u32 reserved:15;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 bank_lsb:8;
+ u32 bank_msb:8;
+ u32 reserved2:8;
+ u32 program:8;
+#endif
+} __packed;
+
+/* MIDI 2.0 Channel Pressure (64bit) */
+struct snd_ump_midi2_msg_caf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 reserved:16;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:16;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Pitch Bend (64bit) */
+struct snd_ump_midi2_msg_pitchbend {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 reserved:16;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:16;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 Per-Note Pitch Bend (64bit) */
+struct snd_ump_midi2_msg_pernote_pitchbend {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 group:4;
+ u32 status:4;
+ u32 channel:4;
+ u32 note:8;
+ u32 reserved:8;
+ /* 1 */
+ u32 data;
+#else
+ /* 0 */
+ u32 reserved:8;
+ u32 note:8;
+ u32 channel:4;
+ u32 status:4;
+ u32 group:4;
+ u32 type:4;
+ /* 1 */
+ u32 data;
+#endif
+} __packed;
+
+/* MIDI 2.0 UMP CVM (64bit) */
+union snd_ump_midi2_msg {
+ struct snd_ump_midi2_msg_note note;
+ struct snd_ump_midi2_msg_paf paf;
+ struct snd_ump_midi2_msg_pernote_cc pernote_cc;
+ struct snd_ump_midi2_msg_pernote_mgmt pernote_mgmt;
+ struct snd_ump_midi2_msg_cc cc;
+ struct snd_ump_midi2_msg_rpn rpn;
+ struct snd_ump_midi2_msg_program pg;
+ struct snd_ump_midi2_msg_caf caf;
+ struct snd_ump_midi2_msg_pitchbend pb;
+ struct snd_ump_midi2_msg_pernote_pitchbend pernote_pb;
+ u32 raw[2];
+};
+
+/* UMP Stream Message: Endpoint Discovery (128bit) */
+struct snd_ump_stream_msg_ep_discovery {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 ump_version_major:8;
+ u32 ump_version_minor:8;
+ /* 1 */
+ u32 reserved:24;
+ u32 filter_bitmap:8;
+ /* 2-3 */
+ u32 reserved2[2];
+#else
+ /* 0 */
+ u32 ump_version_minor:8;
+ u32 ump_version_major:8;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1 */
+ u32 filter_bitmap:8;
+ u32 reserved:24;
+ /* 2-3 */
+ u32 reserved2[2];
+#endif
+} __packed;
+
+/* UMP Stream Message: Endpoint Info Notification (128bit) */
+struct snd_ump_stream_msg_ep_info {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 ump_version_major:8;
+ u32 ump_version_minor:8;
+ /* 1 */
+ u32 static_function_block:1;
+ u32 num_function_blocks:7;
+ u32 reserved:8;
+ u32 protocol:8;
+ u32 reserved2:6;
+ u32 jrts:2;
+ /* 2-3 */
+ u32 reserved3[2];
+#else
+ /* 0 */
+ u32 ump_version_minor:8;
+ u32 ump_version_major:8;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1 */
+ u32 jrts:2;
+ u32 reserved2:6;
+ u32 protocol:8;
+ u32 reserved:8;
+ u32 num_function_blocks:7;
+ u32 static_function_block:1;
+ /* 2-3 */
+ u32 reserved3[2];
+#endif
+} __packed;
+
+/* UMP Stream Message: Device Info Notification (128bit) */
+struct snd_ump_stream_msg_devince_info {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 reserved:16;
+ /* 1 */
+ u32 manufacture_id;
+ /* 2 */
+ u8 family_lsb;
+ u8 family_msb;
+ u8 model_lsb;
+ u8 model_msb;
+ /* 3 */
+ u32 sw_revision;
+#else
+ /* 0 */
+ u32 reserved:16;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1 */
+ u32 manufacture_id;
+ /* 2 */
+ u8 model_msb;
+ u8 model_lsb;
+ u8 family_msb;
+ u8 family_lsb;
+ /* 3 */
+ u32 sw_revision;
+#endif
+} __packed;
+
+/* UMP Stream Message: Stream Config Request / Notification (128bit) */
+struct snd_ump_stream_msg_stream_cfg {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 protocol:8;
+ u32 reserved:6;
+ u32 jrts:2;
+ /* 1-3 */
+ u32 reserved2[3];
+#else
+ /* 0 */
+ u32 jrts:2;
+ u32 reserved:6;
+ u32 protocol:8;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1-3 */
+ u32 reserved2[3];
+#endif
+} __packed;
+
+/* UMP Stream Message: Function Block Discovery (128bit) */
+struct snd_ump_stream_msg_fb_discovery {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 function_block_id:8;
+ u32 filter:8;
+ /* 1-3 */
+ u32 reserved[3];
+#else
+ /* 0 */
+ u32 filter:8;
+ u32 function_block_id:8;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1-3 */
+ u32 reserved[3];
+#endif
+} __packed;
+
+/* UMP Stream Message: Function Block Info Notification (128bit) */
+struct snd_ump_stream_msg_fb_info {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u32 type:4;
+ u32 format:2;
+ u32 status:10;
+ u32 active:1;
+ u32 function_block_id:7;
+ u32 reserved:2;
+ u32 ui_hint:2;
+ u32 midi_10:2;
+ u32 direction:2;
+ /* 1 */
+ u32 first_group:8;
+ u32 num_groups:8;
+ u32 midi_ci_version:8;
+ u32 sysex8_streams:8;
+ /* 2-3 */
+ u32 reserved2[2];
+#else
+ /* 0 */
+ u32 direction:2;
+ u32 midi_10:2;
+ u32 ui_hint:2;
+ u32 reserved:2;
+ u32 function_block_id:7;
+ u32 active:1;
+ u32 status:10;
+ u32 format:2;
+ u32 type:4;
+ /* 1 */
+ u32 sysex8_streams:8;
+ u32 midi_ci_version:8;
+ u32 num_groups:8;
+ u32 first_group:8;
+ /* 2-3 */
+ u32 reserved2[2];
+#endif
+} __packed;
+
+/* UMP Stream Message: Function Block Name Notification (128bit) */
+struct snd_ump_stream_msg_fb_name {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* 0 */
+ u16 type:4;
+ u16 format:2;
+ u16 status:10;
+ u8 function_block_id;
+ u8 name0;
+ /* 1-3 */
+ u8 name[12];
+#else
+ /* 0 */
+ u8 name0;
+ u8 function_block_id;
+ u16 status:10;
+ u16 format:2;
+ u16 type:4;
+ /* 1-3 */
+ u8 name[12]; // FIXME: byte order
+#endif
+} __packed;
+
+/* MIDI 2.0 Stream Messages (128bit) */
+union snd_ump_stream_msg {
+ struct snd_ump_stream_msg_ep_discovery ep_discovery;
+ struct snd_ump_stream_msg_ep_info ep_info;
+ struct snd_ump_stream_msg_devince_info device_info;
+ struct snd_ump_stream_msg_stream_cfg stream_cfg;
+ struct snd_ump_stream_msg_fb_discovery fb_discovery;
+ struct snd_ump_stream_msg_fb_info fb_info;
+ struct snd_ump_stream_msg_fb_name fb_name;
+ u32 raw[4];
+};
+
+#endif /* __SOUND_UMP_MSG_H */
diff --git a/include/sound/wavefront.h b/include/sound/wavefront.h
index 37ed437e2123..ef6f46accf29 100644
--- a/include/sound/wavefront.h
+++ b/include/sound/wavefront.h
@@ -8,34 +8,6 @@
* Copyright (c) by Paul Barton-Davis <pbd@op.net>
*/
-#if (!defined(__GNUC__) && !defined(__GNUG__))
-
- You will not be able to compile this file correctly without gcc, because
- it is necessary to pack the "wavefront_alias" structure to a size
- of 22 bytes, corresponding to 16-bit alignment (as would have been
- the case on the original platform, MS-DOS). If this is not done,
- then WavePatch-format files cannot be read/written correctly.
- The method used to do this here ("__attribute__((packed)") is
- completely compiler dependent.
-
- All other wavefront_* types end up aligned to 32 bit values and
- still have the same (correct) size.
-
-#else
-
- /* However, note that as of G++ 2.7.3.2, g++ was unable to
- correctly parse *type* __attribute__ tags. It will do the
- right thing if we use the "packed" attribute on each struct
- member, which has the same semantics anyway.
- */
-
-#endif /* __GNUC__ */
-
-/***************************** WARNING ********************************
- PLEASE DO NOT MODIFY THIS FILE IN ANY WAY THAT AFFECTS ITS ABILITY TO
- BE USED WITH EITHER C *OR* C++.
- **********************************************************************/
-
#ifndef NUM_MIDIKEYS
#define NUM_MIDIKEYS 128
#endif /* NUM_MIDIKEYS */
@@ -44,29 +16,6 @@
#define NUM_MIDICHANNELS 16
#endif /* NUM_MIDICHANNELS */
-/* These are very useful/important. the original wavefront interface
- was developed on a 16 bit system, where sizeof(int) = 2
- bytes. Defining things like this makes the code much more portable, and
- easier to understand without having to toggle back and forth
- between a 16-bit view of the world and a 32-bit one.
- */
-
-#ifndef __KERNEL__
-/* keep them for compatibility */
-typedef short s16;
-typedef unsigned short u16;
-typedef int s32;
-typedef unsigned int u32;
-typedef char s8;
-typedef unsigned char u8;
-typedef s16 INT16;
-typedef u16 UINT16;
-typedef s32 INT32;
-typedef u32 UINT32;
-typedef s8 CHAR8;
-typedef u8 UCHAR8;
-#endif
-
/* Pseudo-commands not part of the WaveFront command set.
These are used for various driver controls and direct
hardware control.
@@ -468,7 +417,7 @@ typedef struct wf_alias {
*/
u8 sixteen_bit_padding;
-} __attribute__((packed)) wavefront_alias;
+} __packed wavefront_alias;
typedef struct wf_drum {
u8 PatchNumber;
diff --git a/include/sound/wm0010.h b/include/sound/wm0010.h
index 13b473935ca1..14ff9056c5d0 100644
--- a/include/sound/wm0010.h
+++ b/include/sound/wm0010.h
@@ -11,12 +11,6 @@
#define WM0010_PDATA_H
struct wm0010_pdata {
- int gpio_reset;
-
- /* Set if there is an inverter between the GPIO controlling
- * the reset signal and the device.
- */
- int reset_active_high;
int irq_flags;
};
diff --git a/include/sound/wm1250-ev1.h b/include/sound/wm1250-ev1.h
deleted file mode 100644
index d16614ebecb4..000000000000
--- a/include/sound/wm1250-ev1.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/sound/wm1250-ev1.h - Platform data for WM1250-EV1
- *
- * Copyright 2011 Wolfson Microelectronics. PLC.
- */
-
-#ifndef __LINUX_SND_WM1250_EV1_H
-#define __LINUX_SND_WM1250_EV1_H
-
-#define WM1250_EV1_NUM_GPIOS 5
-
-#define WM1250_EV1_GPIO_CLK_ENA 0
-#define WM1250_EV1_GPIO_CLK_SEL0 1
-#define WM1250_EV1_GPIO_CLK_SEL1 2
-#define WM1250_EV1_GPIO_OSR 3
-#define WM1250_EV1_GPIO_MASTER 4
-
-
-struct wm1250_ev1_pdata {
- int gpios[WM1250_EV1_NUM_GPIOS];
-};
-
-#endif
diff --git a/include/sound/wm2200.h b/include/sound/wm2200.h
index 9987e6c09bdc..2e4913ee2505 100644
--- a/include/sound/wm2200.h
+++ b/include/sound/wm2200.h
@@ -42,8 +42,6 @@ struct wm2200_micbias {
};
struct wm2200_pdata {
- int reset; /** GPIO controlling /RESET, if any */
- int ldo_ena; /** GPIO controlling LODENA, if any */
int irq_flags;
int gpio_defaults[4];
diff --git a/include/sound/wm5100.h b/include/sound/wm5100.h
index b94badf72947..1c48090fdb2c 100644
--- a/include/sound/wm5100.h
+++ b/include/sound/wm5100.h
@@ -36,11 +36,7 @@ struct wm5100_jack_mode {
#define WM5100_GPIO_SET 0x10000
struct wm5100_pdata {
- int reset; /** GPIO controlling /RESET, if any */
- int ldo_ena; /** GPIO controlling LODENA, if any */
- int hp_pol; /** GPIO controlling headset polarity, if any */
int irq_flags;
- int gpio_base;
struct wm5100_jack_mode jack_modes[2];
diff --git a/include/sound/wm8996.h b/include/sound/wm8996.h
index 247f9917e33d..342abeef288f 100644
--- a/include/sound/wm8996.h
+++ b/include/sound/wm8996.h
@@ -33,8 +33,6 @@ struct wm8996_retune_mobile_config {
struct wm8996_pdata {
int irq_flags; /** Set IRQ trigger flags; default active low */
- int ldo_ena; /** GPIO for LDO1; -1 for none */
-
int micdet_def; /** Default MICDET_SRC/HP1FB_SRC/MICD_BIAS */
enum wm8996_inmode inl_mode;
@@ -42,7 +40,6 @@ struct wm8996_pdata {
u32 spkmute_seq; /** Value for register 0x802 */
- int gpio_base;
u32 gpio_default[5];
int num_retune_mobile_cfgs;
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 4c15420e8965..60af7c63b34e 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -50,9 +50,6 @@ struct sock;
#define TA_LOGIN_TIMEOUT 15
#define TA_LOGIN_TIMEOUT_MAX 30
#define TA_LOGIN_TIMEOUT_MIN 5
-#define TA_NETIF_TIMEOUT 2
-#define TA_NETIF_TIMEOUT_MAX 15
-#define TA_NETIF_TIMEOUT_MIN 2
#define TA_GENERATE_NODE_ACLS 0
#define TA_DEFAULT_CMDSN_DEPTH 64
#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
@@ -773,7 +770,6 @@ to_iscsi_nacl(struct se_node_acl *se_nacl)
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
- u32 netif_timeout;
u32 generate_node_acls;
u32 cache_dynamic_acls;
u32 default_cmdsn_depth;
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index a3c193df25b3..739df993aa5e 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -62,13 +62,17 @@ struct target_backend_ops {
struct configfs_attribute **tb_dev_action_attrs;
};
-struct sbc_ops {
+struct exec_cmd_ops {
sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *,
u32, enum dma_data_direction);
sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
sense_reason_t (*execute_unmap)(struct se_cmd *cmd,
sector_t lba, sector_t nolb);
+ sense_reason_t (*execute_pr_out)(struct se_cmd *cmd, u8 sa, u64 key,
+ u64 sa_key, u8 type, bool aptpl);
+ sense_reason_t (*execute_pr_in)(struct se_cmd *cmd, u8 sa,
+ unsigned char *param_data);
};
int transport_backend_register(const struct target_backend_ops *);
@@ -86,7 +90,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *);
-sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
+sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops);
u32 sbc_get_device_rev(struct se_device *dev);
u32 sbc_get_device_type(struct se_device *dev);
sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5f8e96f1516f..97099a5e3f6c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -108,6 +108,15 @@
#define SE_MODE_PAGE_BUF 512
#define SE_SENSE_BUF 96
+enum target_submit_type {
+ /* Use the fabric driver's default submission type */
+ TARGET_FABRIC_DEFAULT_SUBMIT,
+ /* Submit from the calling context */
+ TARGET_DIRECT_SUBMIT,
+ /* Defer submission to the LIO workqueue */
+ TARGET_QUEUE_SUBMIT,
+};
+
/* struct se_hba->hba_flags */
enum hba_flags_table {
HBA_FLAGS_INTERNAL_USE = 0x01,
@@ -717,6 +726,7 @@ struct se_dev_attrib {
u32 unmap_granularity;
u32 unmap_granularity_alignment;
u32 max_write_same_len;
+ u8 submit_type;
struct se_device *da_dev;
struct config_group da_group;
};
@@ -880,7 +890,8 @@ struct target_opcode_descriptor {
u8 specific_timeout;
u16 nominal_timeout;
u16 recommended_timeout;
- bool (*enabled)(struct se_cmd *cmd);
+ bool (*enabled)(struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd);
void (*update_usage_bits)(u8 *usage_bits,
struct se_device *dev);
u8 usage_bits[];
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index b188b1e90e1e..3378ff9ee271 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -113,11 +113,20 @@ struct target_core_fabric_ops {
struct configfs_attribute **tfc_tpg_nacl_param_attrs;
/*
- * Set this member variable to true if the SCSI transport protocol
+ * Set this member variable if the SCSI transport protocol
* (e.g. iSCSI) requires that the Data-Out buffer is transferred in
* its entirety before a command is aborted.
*/
- bool write_pending_must_be_called;
+ unsigned int write_pending_must_be_called:1;
+ /*
+ * Set this if the driver supports submitting commands to the backend
+ * from target_submit/target_submit_cmd.
+ */
+ unsigned int direct_submit_supp:1;
+ /*
+ * Set this to a target_submit_type value.
+ */
+ u8 default_submit_type;
};
int target_register_template(const struct target_core_fabric_ops *fo);
@@ -166,20 +175,18 @@ int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
struct scatterlist *sgl, u32 sgl_count,
struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp);
-void target_submit(struct se_cmd *se_cmd);
+int target_submit(struct se_cmd *se_cmd);
sense_reason_t transport_lookup_cmd_lun(struct se_cmd *);
sense_reason_t target_cmd_init_cdb(struct se_cmd *se_cmd, unsigned char *cdb,
gfp_t gfp);
sense_reason_t target_cmd_parse_cdb(struct se_cmd *);
void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
unsigned char *, u64, u32, int, int, int);
-void target_queue_submission(struct se_cmd *se_cmd);
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u64 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t, u64, int);
-int transport_handle_cdb_direct(struct se_cmd *);
sense_reason_t transport_generic_new_cmd(struct se_cmd *);
void target_put_cmd_and_wait(struct se_cmd *cmd);
@@ -197,8 +204,6 @@ void target_stop_session(struct se_session *se_sess);
void target_wait_for_sess_cmds(struct se_session *);
void target_show_cmd(const char *pfx, struct se_cmd *cmd);
-int core_alua_check_nonop_delay(struct se_cmd *);
-
int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
void core_tmr_release_req(struct se_tmr_req *);
int transport_generic_handle_tmr(struct se_cmd *);
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 1f7fc1fc590c..e609cd7da47e 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -12,6 +12,8 @@
#undef __perf_task
#define __perf_task(t) (t)
+#include <linux/args.h>
+
/* cast any integer, pointer, or small struct to u64 */
#define UINTTYPE(size) \
__typeof__(__builtin_choose_expr(size == 1, (u8)1, \
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
index 4dfa6d7f83ba..cd104a1343e2 100644
--- a/include/trace/events/9p.h
+++ b/include/trace/events/9p.h
@@ -178,18 +178,21 @@ TRACE_EVENT(9p_protocol_dump,
__field( void *, clnt )
__field( __u8, type )
__field( __u16, tag )
- __array( unsigned char, line, P9_PROTO_DUMP_SZ )
+ __dynamic_array(unsigned char, line,
+ min_t(size_t, pdu->capacity, P9_PROTO_DUMP_SZ))
),
TP_fast_assign(
__entry->clnt = clnt;
__entry->type = pdu->id;
__entry->tag = pdu->tag;
- memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
+ memcpy(__get_dynamic_array(line), pdu->sdata,
+ __get_dynamic_array_len(line));
),
- TP_printk("clnt %lu %s(tag = %d)\n%.3x: %16ph\n%.3x: %16ph\n",
+ TP_printk("clnt %lu %s(tag = %d)\n%*ph\n",
(unsigned long)__entry->clnt, show_9p_op(__entry->type),
- __entry->tag, 0, __entry->line, 16, __entry->line + 16)
+ __entry->tag, __get_dynamic_array_len(line),
+ __get_dynamic_array(line))
);
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index e9d412d19dbb..08f2c93d6b16 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -18,97 +18,6 @@
#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-enum afs_call_trace {
- afs_call_trace_alloc,
- afs_call_trace_free,
- afs_call_trace_get,
- afs_call_trace_put,
- afs_call_trace_wake,
- afs_call_trace_work,
-};
-
-enum afs_server_trace {
- afs_server_trace_alloc,
- afs_server_trace_callback,
- afs_server_trace_destroy,
- afs_server_trace_free,
- afs_server_trace_gc,
- afs_server_trace_get_by_addr,
- afs_server_trace_get_by_uuid,
- afs_server_trace_get_caps,
- afs_server_trace_get_install,
- afs_server_trace_get_new_cbi,
- afs_server_trace_get_probe,
- afs_server_trace_give_up_cb,
- afs_server_trace_purging,
- afs_server_trace_put_call,
- afs_server_trace_put_cbi,
- afs_server_trace_put_find_rsq,
- afs_server_trace_put_probe,
- afs_server_trace_put_slist,
- afs_server_trace_put_slist_isort,
- afs_server_trace_put_uuid_rsq,
- afs_server_trace_update,
-};
-
-
-enum afs_volume_trace {
- afs_volume_trace_alloc,
- afs_volume_trace_free,
- afs_volume_trace_get_alloc_sbi,
- afs_volume_trace_get_cell_insert,
- afs_volume_trace_get_new_op,
- afs_volume_trace_get_query_alias,
- afs_volume_trace_put_cell_dup,
- afs_volume_trace_put_cell_root,
- afs_volume_trace_put_destroy_sbi,
- afs_volume_trace_put_free_fc,
- afs_volume_trace_put_put_op,
- afs_volume_trace_put_query_alias,
- afs_volume_trace_put_validate_fc,
- afs_volume_trace_remove,
-};
-
-enum afs_cell_trace {
- afs_cell_trace_alloc,
- afs_cell_trace_free,
- afs_cell_trace_get_queue_dns,
- afs_cell_trace_get_queue_manage,
- afs_cell_trace_get_queue_new,
- afs_cell_trace_get_vol,
- afs_cell_trace_insert,
- afs_cell_trace_manage,
- afs_cell_trace_put_candidate,
- afs_cell_trace_put_destroy,
- afs_cell_trace_put_queue_fail,
- afs_cell_trace_put_queue_work,
- afs_cell_trace_put_vol,
- afs_cell_trace_see_source,
- afs_cell_trace_see_ws,
- afs_cell_trace_unuse_alias,
- afs_cell_trace_unuse_check_alias,
- afs_cell_trace_unuse_delete,
- afs_cell_trace_unuse_fc,
- afs_cell_trace_unuse_lookup,
- afs_cell_trace_unuse_mntpt,
- afs_cell_trace_unuse_no_pin,
- afs_cell_trace_unuse_parse,
- afs_cell_trace_unuse_pin,
- afs_cell_trace_unuse_probe,
- afs_cell_trace_unuse_sbi,
- afs_cell_trace_unuse_ws,
- afs_cell_trace_use_alias,
- afs_cell_trace_use_check_alias,
- afs_cell_trace_use_fc,
- afs_cell_trace_use_fc_alias,
- afs_cell_trace_use_lookup,
- afs_cell_trace_use_mntpt,
- afs_cell_trace_use_pin,
- afs_cell_trace_use_probe,
- afs_cell_trace_use_sbi,
- afs_cell_trace_wait,
-};
-
enum afs_fs_operation {
afs_FS_FetchData = 130, /* AFS Fetch file data */
afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
@@ -202,121 +111,6 @@ enum yfs_cm_operation {
yfs_CB_CallBack = 64204,
};
-enum afs_edit_dir_op {
- afs_edit_dir_create,
- afs_edit_dir_create_error,
- afs_edit_dir_create_inval,
- afs_edit_dir_create_nospc,
- afs_edit_dir_delete,
- afs_edit_dir_delete_error,
- afs_edit_dir_delete_inval,
- afs_edit_dir_delete_noent,
-};
-
-enum afs_edit_dir_reason {
- afs_edit_dir_for_create,
- afs_edit_dir_for_link,
- afs_edit_dir_for_mkdir,
- afs_edit_dir_for_rename_0,
- afs_edit_dir_for_rename_1,
- afs_edit_dir_for_rename_2,
- afs_edit_dir_for_rmdir,
- afs_edit_dir_for_silly_0,
- afs_edit_dir_for_silly_1,
- afs_edit_dir_for_symlink,
- afs_edit_dir_for_unlink,
-};
-
-enum afs_eproto_cause {
- afs_eproto_bad_status,
- afs_eproto_cb_count,
- afs_eproto_cb_fid_count,
- afs_eproto_cellname_len,
- afs_eproto_file_type,
- afs_eproto_ibulkst_cb_count,
- afs_eproto_ibulkst_count,
- afs_eproto_motd_len,
- afs_eproto_offline_msg_len,
- afs_eproto_volname_len,
- afs_eproto_yvl_fsendpt4_len,
- afs_eproto_yvl_fsendpt6_len,
- afs_eproto_yvl_fsendpt_num,
- afs_eproto_yvl_fsendpt_type,
- afs_eproto_yvl_vlendpt4_len,
- afs_eproto_yvl_vlendpt6_len,
- afs_eproto_yvl_vlendpt_type,
-};
-
-enum afs_io_error {
- afs_io_error_cm_reply,
- afs_io_error_extract,
- afs_io_error_fs_probe_fail,
- afs_io_error_vl_lookup_fail,
- afs_io_error_vl_probe_fail,
-};
-
-enum afs_file_error {
- afs_file_error_dir_bad_magic,
- afs_file_error_dir_big,
- afs_file_error_dir_missing_page,
- afs_file_error_dir_name_too_long,
- afs_file_error_dir_over_end,
- afs_file_error_dir_small,
- afs_file_error_dir_unmarked_ext,
- afs_file_error_mntpt,
- afs_file_error_writeback_fail,
-};
-
-enum afs_flock_event {
- afs_flock_acquired,
- afs_flock_callback_break,
- afs_flock_defer_unlock,
- afs_flock_extend_fail,
- afs_flock_fail_other,
- afs_flock_fail_perm,
- afs_flock_no_lockers,
- afs_flock_release_fail,
- afs_flock_silly_delete,
- afs_flock_timestamp,
- afs_flock_try_to_lock,
- afs_flock_vfs_lock,
- afs_flock_vfs_locking,
- afs_flock_waited,
- afs_flock_waiting,
- afs_flock_work_extending,
- afs_flock_work_retry,
- afs_flock_work_unlocking,
- afs_flock_would_block,
-};
-
-enum afs_flock_operation {
- afs_flock_op_copy_lock,
- afs_flock_op_flock,
- afs_flock_op_grant,
- afs_flock_op_lock,
- afs_flock_op_release_lock,
- afs_flock_op_return_ok,
- afs_flock_op_return_eagain,
- afs_flock_op_return_edeadlk,
- afs_flock_op_return_error,
- afs_flock_op_set_lock,
- afs_flock_op_unlock,
- afs_flock_op_wake,
-};
-
-enum afs_cb_break_reason {
- afs_cb_break_no_break,
- afs_cb_break_no_promise,
- afs_cb_break_for_callback,
- afs_cb_break_for_deleted,
- afs_cb_break_for_lapsed,
- afs_cb_break_for_s_reinit,
- afs_cb_break_for_unlink,
- afs_cb_break_for_v_break,
- afs_cb_break_for_volume_callback,
- afs_cb_break_for_zap,
-};
-
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -357,9 +151,11 @@ enum afs_cb_break_reason {
EM(afs_volume_trace_alloc, "ALLOC ") \
EM(afs_volume_trace_free, "FREE ") \
EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \
+ EM(afs_volume_trace_get_callback, "GET callback ") \
EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \
EM(afs_volume_trace_get_new_op, "GET op-new ") \
EM(afs_volume_trace_get_query_alias, "GET cell-alias") \
+ EM(afs_volume_trace_put_callback, "PUT callback ") \
EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \
EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \
EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \
@@ -391,6 +187,7 @@ enum afs_cb_break_reason {
EM(afs_cell_trace_unuse_fc, "UNU fc ") \
EM(afs_cell_trace_unuse_lookup, "UNU lookup") \
EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
+ EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \
EM(afs_cell_trace_unuse_parse, "UNU parse ") \
EM(afs_cell_trace_unuse_pin, "UNU pin ") \
EM(afs_cell_trace_unuse_probe, "UNU probe ") \
@@ -407,6 +204,40 @@ enum afs_cb_break_reason {
EM(afs_cell_trace_use_sbi, "USE sbi ") \
E_(afs_cell_trace_wait, "WAIT ")
+#define afs_alist_traces \
+ EM(afs_alist_trace_alloc, "ALLOC ") \
+ EM(afs_alist_trace_get_estate, "GET estate") \
+ EM(afs_alist_trace_get_vlgetcaps, "GET vgtcap") \
+ EM(afs_alist_trace_get_vlprobe, "GET vprobe") \
+ EM(afs_alist_trace_get_vlrotate_set, "GET vl-rot") \
+ EM(afs_alist_trace_put_estate, "PUT estate") \
+ EM(afs_alist_trace_put_getaddru, "PUT GtAdrU") \
+ EM(afs_alist_trace_put_parse_empty, "PUT p-empt") \
+ EM(afs_alist_trace_put_parse_error, "PUT p-err ") \
+ EM(afs_alist_trace_put_server_dup, "PUT sv-dup") \
+ EM(afs_alist_trace_put_server_oom, "PUT sv-oom") \
+ EM(afs_alist_trace_put_server_update, "PUT sv-upd") \
+ EM(afs_alist_trace_put_vlgetcaps, "PUT vgtcap") \
+ EM(afs_alist_trace_put_vlprobe, "PUT vprobe") \
+ EM(afs_alist_trace_put_vlrotate_end, "PUT vr-end") \
+ EM(afs_alist_trace_put_vlrotate_fail, "PUT vr-fai") \
+ EM(afs_alist_trace_put_vlrotate_next, "PUT vr-nxt") \
+ EM(afs_alist_trace_put_vlrotate_restart,"PUT vr-rst") \
+ EM(afs_alist_trace_put_vlserver, "PUT vlsrvr") \
+ EM(afs_alist_trace_put_vlserver_old, "PUT vs-old") \
+ E_(afs_alist_trace_free, "FREE ")
+
+#define afs_estate_traces \
+ EM(afs_estate_trace_alloc_probe, "ALLOC prob") \
+ EM(afs_estate_trace_alloc_server, "ALLOC srvr") \
+ EM(afs_estate_trace_get_server_state, "GET srv-st") \
+ EM(afs_estate_trace_get_getcaps, "GET getcap") \
+ EM(afs_estate_trace_put_getcaps, "PUT getcap") \
+ EM(afs_estate_trace_put_probe, "PUT probe ") \
+ EM(afs_estate_trace_put_server, "PUT server") \
+ EM(afs_estate_trace_put_server_state, "PUT srv-st") \
+ E_(afs_estate_trace_free, "FREE ")
+
#define afs_fs_operations \
EM(afs_FS_FetchData, "FS.FetchData") \
EM(afs_FS_FetchStatus, "FS.FetchStatus") \
@@ -604,15 +435,67 @@ enum afs_cb_break_reason {
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
- EM(afs_cb_break_no_promise, "no-promise") \
EM(afs_cb_break_for_callback, "break-cb") \
+ EM(afs_cb_break_for_creation_regress, "creation-regress") \
EM(afs_cb_break_for_deleted, "break-del") \
- EM(afs_cb_break_for_lapsed, "break-lapsed") \
EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_v_break, "break-v") \
+ EM(afs_cb_break_for_update_regress, "update-regress") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
- E_(afs_cb_break_for_zap, "break-zap")
+ EM(afs_cb_break_for_vos_release, "break-vos-release") \
+ E_(afs_cb_break_volume_excluded, "vol-excluded")
+
+#define afs_rotate_traces \
+ EM(afs_rotate_trace_aborted, "Abortd") \
+ EM(afs_rotate_trace_busy_sleep, "BsySlp") \
+ EM(afs_rotate_trace_check_vol_status, "VolStt") \
+ EM(afs_rotate_trace_failed, "Failed") \
+ EM(afs_rotate_trace_iter, "Iter ") \
+ EM(afs_rotate_trace_iterate_addr, "ItAddr") \
+ EM(afs_rotate_trace_next_server, "NextSv") \
+ EM(afs_rotate_trace_no_more_servers, "NoMore") \
+ EM(afs_rotate_trace_nomem, "Nomem ") \
+ EM(afs_rotate_trace_probe_error, "PrbErr") \
+ EM(afs_rotate_trace_probe_fileserver, "PrbFsv") \
+ EM(afs_rotate_trace_probe_none, "PrbNon") \
+ EM(afs_rotate_trace_probe_response, "PrbRsp") \
+ EM(afs_rotate_trace_probe_superseded, "PrbSup") \
+ EM(afs_rotate_trace_restart, "Rstart") \
+ EM(afs_rotate_trace_retry_server, "RtrySv") \
+ EM(afs_rotate_trace_selected_server, "SlctSv") \
+ EM(afs_rotate_trace_stale_lock, "StlLck") \
+ EM(afs_rotate_trace_start, "Start ") \
+ EM(afs_rotate_trace_stop, "Stop ") \
+ E_(afs_rotate_trace_stopped, "Stoppd")
+
+/*
+ * Generate enums for tracing information.
+ */
+#ifndef __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+#define __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum afs_alist_trace { afs_alist_traces } __mode(byte);
+enum afs_call_trace { afs_call_traces } __mode(byte);
+enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte);
+enum afs_cell_trace { afs_cell_traces } __mode(byte);
+enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte);
+enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte);
+enum afs_eproto_cause { afs_eproto_causes } __mode(byte);
+enum afs_estate_trace { afs_estate_traces } __mode(byte);
+enum afs_file_error { afs_file_errors } __mode(byte);
+enum afs_flock_event { afs_flock_events } __mode(byte);
+enum afs_flock_operation { afs_flock_operations } __mode(byte);
+enum afs_io_error { afs_io_errors } __mode(byte);
+enum afs_rotate_trace { afs_rotate_traces } __mode(byte);
+enum afs_server_trace { afs_server_traces } __mode(byte);
+enum afs_volume_trace { afs_volume_traces } __mode(byte);
+
+#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */
/*
* Export enum symbols via userspace.
@@ -622,21 +505,24 @@ enum afs_cb_break_reason {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
+afs_alist_traces;
afs_call_traces;
-afs_server_traces;
+afs_cb_break_reasons;
afs_cell_traces;
-afs_fs_operations;
-afs_vl_operations;
afs_cm_operations;
-yfs_cm_operations;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
-afs_io_errors;
+afs_estate_traces;
afs_file_errors;
-afs_flock_types;
afs_flock_operations;
-afs_cb_break_reasons;
+afs_flock_types;
+afs_fs_operations;
+afs_io_errors;
+afs_rotate_traces;
+afs_server_traces;
+afs_vl_operations;
+yfs_cm_operations;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -654,12 +540,12 @@ TRACE_EVENT(afs_receive_data,
TP_ARGS(call, iter, want_more, ret),
TP_STRUCT__entry(
- __field(loff_t, remain )
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
- __field(bool, want_more )
- __field(int, ret )
+ __field(loff_t, remain)
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
+ __field(bool, want_more)
+ __field(int, ret)
),
TP_fast_assign(
@@ -686,9 +572,9 @@ TRACE_EVENT(afs_notify_call,
TP_ARGS(rxcall, call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
),
TP_fast_assign(
@@ -708,9 +594,9 @@ TRACE_EVENT(afs_cb_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, op )
- __field(u16, service_id )
+ __field(unsigned int, call)
+ __field(u32, op)
+ __field(u16, service_id)
),
TP_fast_assign(
@@ -733,11 +619,11 @@ TRACE_EVENT(afs_call,
TP_ARGS(call_debug_id, op, ref, outstanding, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, op )
- __field(int, ref )
- __field(int, outstanding )
- __field(const void *, where )
+ __field(unsigned int, call)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, outstanding)
+ __field(const void *, where)
),
TP_fast_assign(
@@ -762,9 +648,9 @@ TRACE_EVENT(afs_make_fs_call,
TP_ARGS(call, fid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -794,10 +680,10 @@ TRACE_EVENT(afs_make_fs_calli,
TP_ARGS(call, fid, i),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, i )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(unsigned int, i)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -829,10 +715,10 @@ TRACE_EVENT(afs_make_fs_call1,
TP_ARGS(call, fid, name),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -866,11 +752,11 @@ TRACE_EVENT(afs_make_fs_call2,
TP_ARGS(call, fid, name, name2),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
- __array(char, name2, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
+ __array(char, name2, 24)
),
TP_fast_assign(
@@ -907,8 +793,8 @@ TRACE_EVENT(afs_make_vl_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_vl_operation, op )
+ __field(unsigned int, call)
+ __field(enum afs_vl_operation, op)
),
TP_fast_assign(
@@ -927,10 +813,10 @@ TRACE_EVENT(afs_call_done,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(struct rxrpc_call *, rx_call )
- __field(int, ret )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(struct rxrpc_call *, rx_call)
+ __field(int, ret)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -953,10 +839,10 @@ TRACE_EVENT(afs_send_data,
TP_ARGS(call, msg),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, flags )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(unsigned int, flags)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -977,10 +863,10 @@ TRACE_EVENT(afs_sent_data,
TP_ARGS(call, msg, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, ret )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(int, ret)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -1001,9 +887,9 @@ TRACE_EVENT(afs_dir_check_failed,
TP_ARGS(vnode, off, i_size),
TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(loff_t, off )
- __field(loff_t, i_size )
+ __field(struct afs_vnode *, vnode)
+ __field(loff_t, off)
+ __field(loff_t, i_size)
),
TP_fast_assign(
@@ -1016,37 +902,6 @@ TRACE_EVENT(afs_dir_check_failed,
__entry->vnode, __entry->off, __entry->i_size)
);
-TRACE_EVENT(afs_folio_dirty,
- TP_PROTO(struct afs_vnode *vnode, const char *where, struct folio *folio),
-
- TP_ARGS(vnode, where, folio),
-
- TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(const char *, where )
- __field(pgoff_t, index )
- __field(unsigned long, from )
- __field(unsigned long, to )
- ),
-
- TP_fast_assign(
- unsigned long priv = (unsigned long)folio_get_private(folio);
- __entry->vnode = vnode;
- __entry->where = where;
- __entry->index = folio_index(folio);
- __entry->from = afs_folio_dirty_from(folio, priv);
- __entry->to = afs_folio_dirty_to(folio, priv);
- __entry->to |= (afs_is_folio_dirty_mmapped(priv) ?
- (1UL << (BITS_PER_LONG - 1)) : 0);
- ),
-
- TP_printk("vn=%p %lx %s %lx-%lx%s",
- __entry->vnode, __entry->index, __entry->where,
- __entry->from,
- __entry->to & ~(1UL << (BITS_PER_LONG - 1)),
- __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "")
- );
-
TRACE_EVENT(afs_call_state,
TP_PROTO(struct afs_call *call,
enum afs_call_state from,
@@ -1056,11 +911,11 @@ TRACE_EVENT(afs_call_state,
TP_ARGS(call, from, to, ret, remote_abort),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, from )
- __field(enum afs_call_state, to )
- __field(int, ret )
- __field(u32, abort )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, from)
+ __field(enum afs_call_state, to)
+ __field(int, ret)
+ __field(u32, abort)
),
TP_fast_assign(
@@ -1084,9 +939,9 @@ TRACE_EVENT(afs_lookup,
TP_ARGS(dvnode, name, fid),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, dfid )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field_struct(struct afs_fid, dfid)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1116,15 +971,15 @@ TRACE_EVENT(afs_edit_dir,
TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name),
TP_STRUCT__entry(
- __field(unsigned int, vnode )
- __field(unsigned int, unique )
- __field(enum afs_edit_dir_reason, why )
- __field(enum afs_edit_dir_op, op )
- __field(unsigned int, block )
- __field(unsigned short, slot )
- __field(unsigned int, f_vnode )
- __field(unsigned int, f_unique )
- __array(char, name, 24 )
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_edit_dir_reason, why)
+ __field(enum afs_edit_dir_op, op)
+ __field(unsigned int, block)
+ __field(unsigned short, slot)
+ __field(unsigned int, f_vnode)
+ __field(unsigned int, f_unique)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1157,8 +1012,8 @@ TRACE_EVENT(afs_protocol_error,
TP_ARGS(call, cause),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_eproto_cause, cause )
+ __field(unsigned int, call)
+ __field(enum afs_eproto_cause, cause)
),
TP_fast_assign(
@@ -1177,9 +1032,9 @@ TRACE_EVENT(afs_io_error,
TP_ARGS(call, error, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, error )
- __field(enum afs_io_error, where )
+ __field(unsigned int, call)
+ __field(int, error)
+ __field(enum afs_io_error, where)
),
TP_fast_assign(
@@ -1199,9 +1054,9 @@ TRACE_EVENT(afs_file_error,
TP_ARGS(vnode, error, where),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(int, error )
- __field(enum afs_file_error, where )
+ __field_struct(struct afs_fid, fid)
+ __field(int, error)
+ __field(enum afs_file_error, where)
),
TP_fast_assign(
@@ -1216,15 +1071,40 @@ TRACE_EVENT(afs_file_error,
__print_symbolic(__entry->where, afs_file_errors))
);
+TRACE_EVENT(afs_bulkstat_error,
+ TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort),
+
+ TP_ARGS(op, fid, index, abort),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, op)
+ __field(unsigned int, index)
+ __field(s32, abort)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->fid = *fid;
+ __entry->index = index;
+ __entry->abort = abort;
+ ),
+
+ TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d",
+ __entry->op, __entry->index,
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+ __entry->abort)
+ );
+
TRACE_EVENT(afs_cm_no_server,
TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
TP_ARGS(call, srx),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1243,9 +1123,9 @@ TRACE_EVENT(afs_cm_no_server_u,
TP_ARGS(call, uuid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(uuid_t, uuid )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(uuid_t, uuid)
),
TP_fast_assign(
@@ -1265,11 +1145,11 @@ TRACE_EVENT(afs_flock_ev,
TP_ARGS(vnode, fl, event, error),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_flock_event, event )
- __field(enum afs_lock_state, state )
- __field(int, error )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_flock_event, event)
+ __field(enum afs_lock_state, state)
+ __field(int, error)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1295,13 +1175,13 @@ TRACE_EVENT(afs_flock_op,
TP_ARGS(vnode, fl, op),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(loff_t, from )
- __field(loff_t, len )
- __field(enum afs_flock_operation, op )
- __field(unsigned char, type )
- __field(unsigned int, flags )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(loff_t, from)
+ __field(loff_t, len)
+ __field(enum afs_flock_operation, op)
+ __field(unsigned char, type)
+ __field(unsigned int, flags)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1328,7 +1208,7 @@ TRACE_EVENT(afs_reload_dir,
TP_ARGS(vnode),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -1345,8 +1225,8 @@ TRACE_EVENT(afs_silly_rename,
TP_ARGS(vnode, done),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(bool, done )
+ __field_struct(struct afs_fid, fid)
+ __field(bool, done)
),
TP_fast_assign(
@@ -1365,9 +1245,9 @@ TRACE_EVENT(afs_get_tree,
TP_ARGS(cell, volume),
TP_STRUCT__entry(
- __field(u64, vid )
- __array(char, cell, 24 )
- __array(char, volume, 24 )
+ __field(u64, vid)
+ __array(char, cell, 24)
+ __array(char, volume, 24)
),
TP_fast_assign(
@@ -1385,6 +1265,30 @@ TRACE_EVENT(afs_get_tree,
__entry->cell, __entry->volume, __entry->vid)
);
+TRACE_EVENT(afs_cb_v_break,
+ TP_PROTO(afs_volid_t vid, unsigned int cb_v_break,
+ enum afs_cb_break_reason reason),
+
+ TP_ARGS(vid, cb_v_break, reason),
+
+ TP_STRUCT__entry(
+ __field(afs_volid_t, vid)
+ __field(unsigned int, cb_v_break)
+ __field(enum afs_cb_break_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->vid = vid;
+ __entry->cb_v_break = cb_v_break;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%llx vb=%x %s",
+ __entry->vid,
+ __entry->cb_v_break,
+ __print_symbolic(__entry->reason, afs_cb_break_reasons))
+ );
+
TRACE_EVENT(afs_cb_break,
TP_PROTO(struct afs_fid *fid, unsigned int cb_break,
enum afs_cb_break_reason reason, bool skipped),
@@ -1392,10 +1296,10 @@ TRACE_EVENT(afs_cb_break,
TP_ARGS(fid, cb_break, reason, skipped),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(unsigned int, cb_break )
- __field(enum afs_cb_break_reason, reason )
- __field(bool, skipped )
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, cb_break)
+ __field(enum afs_cb_break_reason, reason)
+ __field(bool, skipped)
),
TP_fast_assign(
@@ -1418,8 +1322,8 @@ TRACE_EVENT(afs_cb_miss,
TP_ARGS(fid, reason),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_cb_break_reason, reason )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_cb_break_reason, reason)
),
TP_fast_assign(
@@ -1439,10 +1343,10 @@ TRACE_EVENT(afs_server,
TP_ARGS(server_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, server )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, server)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
@@ -1465,9 +1369,9 @@ TRACE_EVENT(afs_volume,
TP_ARGS(vid, ref, reason),
TP_STRUCT__entry(
- __field(afs_volid_t, vid )
- __field(int, ref )
- __field(enum afs_volume_trace, reason )
+ __field(afs_volid_t, vid)
+ __field(int, ref)
+ __field(enum afs_volume_trace, reason)
),
TP_fast_assign(
@@ -1489,10 +1393,10 @@ TRACE_EVENT(afs_cell,
TP_ARGS(cell_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, cell )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, cell)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
@@ -1509,6 +1413,199 @@ TRACE_EVENT(afs_cell,
__entry->active)
);
+TRACE_EVENT(afs_alist,
+ TP_PROTO(unsigned int alist_debug_id, int ref, enum afs_alist_trace reason),
+
+ TP_ARGS(alist_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, alist)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->alist = alist_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("AL=%08x %s r=%d",
+ __entry->alist,
+ __print_symbolic(__entry->reason, afs_alist_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_estate,
+ TP_PROTO(unsigned int server_debug_id, unsigned int estate_debug_id,
+ int ref, enum afs_estate_trace reason),
+
+ TP_ARGS(server_debug_id, estate_debug_id, ref, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server_debug_id;
+ __entry->estate = estate_debug_id;
+ __entry->ref = ref;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("ES=%08x[%x] %s r=%d",
+ __entry->server,
+ __entry->estate,
+ __print_symbolic(__entry->reason, afs_estate_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(afs_fs_probe,
+ TP_PROTO(struct afs_server *server, bool tx, struct afs_endpoint_state *estate,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, estate, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(unsigned int, estate)
+ __field(bool, tx)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ struct afs_addr_list *alist = estate->addresses;
+ __entry->server = server->debug_id;
+ __entry->estate = estate->probe_seq;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("s=%08x %s pq=%x ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->estate,
+ __entry->addr_index, __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_vl_probe,
+ TP_PROTO(struct afs_vlserver *server, bool tx, struct afs_addr_list *alist,
+ unsigned int addr_index, int error, s32 abort_code, unsigned int rtt_us),
+
+ TP_ARGS(server, tx, alist, addr_index, error, abort_code, rtt_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, server)
+ __field(bool, tx)
+ __field(unsigned short, flags)
+ __field(u16, addr_index)
+ __field(short, error)
+ __field(s32, abort_code)
+ __field(unsigned int, rtt_us)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->server = server->debug_id;
+ __entry->tx = tx;
+ __entry->addr_index = addr_index;
+ __entry->error = error;
+ __entry->abort_code = abort_code;
+ __entry->rtt_us = rtt_us;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(alist->addrs[addr_index].peer),
+ sizeof(__entry->srx));
+ ),
+
+ TP_printk("vl=%08x %s ax=%u e=%d ac=%d rtt=%d %pISpc",
+ __entry->server, __entry->tx ? "tx" : "rx", __entry->addr_index,
+ __entry->error, __entry->abort_code, __entry->rtt_us,
+ &__entry->srx.transport)
+ );
+
+TRACE_EVENT(afs_rotate,
+ TP_PROTO(struct afs_operation *op, enum afs_rotate_trace reason, unsigned int extra),
+
+ TP_ARGS(op, reason, extra),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, op)
+ __field(unsigned int, flags)
+ __field(unsigned int, extra)
+ __field(unsigned short, iteration)
+ __field(short, server_index)
+ __field(short, addr_index)
+ __field(enum afs_rotate_trace, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->flags = op->flags;
+ __entry->iteration = op->nr_iterations;
+ __entry->server_index = op->server_index;
+ __entry->addr_index = op->addr_index;
+ __entry->reason = reason;
+ __entry->extra = extra;
+ ),
+
+ TP_printk("OP=%08x it=%02x %s fl=%x sx=%d ax=%d ext=%d",
+ __entry->op,
+ __entry->iteration,
+ __print_symbolic(__entry->reason, afs_rotate_traces),
+ __entry->flags,
+ __entry->server_index,
+ __entry->addr_index,
+ __entry->extra)
+ );
+
+TRACE_EVENT(afs_make_call,
+ TP_PROTO(struct afs_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call)
+ __field(bool, is_vl)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __field_struct(struct sockaddr_rxrpc, srx)
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->op = call->operation_ID;
+ __entry->fid = call->fid;
+ memcpy(&__entry->srx, rxrpc_kernel_remote_srx(call->peer),
+ sizeof(__entry->srx));
+ __entry->srx.srx_service = call->service_id;
+ __entry->is_vl = (__entry->srx.srx_service == VL_SERVICE ||
+ __entry->srx.srx_service == YFS_VL_SERVICE);
+ ),
+
+ TP_printk("c=%08x %pISpc+%u %s %llx:%llx:%x",
+ __entry->call,
+ &__entry->srx.transport,
+ __entry->srx.srx_service,
+ __entry->is_vl ?
+ __print_symbolic(__entry->op, afs_vl_operations) :
+ __print_symbolic(__entry->op, afs_fs_operations),
+ __entry->fid.vid,
+ __entry->fid.vnode,
+ __entry->fid.unique)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 7f4dfbdf12a6..0e128ad51460 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -12,6 +12,7 @@
#define RWBS_LEN 8
+#ifdef CONFIG_BUFFER_HEAD
DECLARE_EVENT_CLASS(block_buffer,
TP_PROTO(struct buffer_head *bh),
@@ -61,6 +62,7 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
TP_ARGS(bh)
);
+#endif /* CONFIG_BUFFER_HEAD */
/**
* block_rq_requeue - place block IO request back on a queue
@@ -246,6 +248,32 @@ DEFINE_EVENT(block_rq, block_rq_merge,
);
/**
+ * block_io_start - insert a request for execution
+ * @rq: block IO operation request
+ *
+ * Called when block operation request @rq is queued for execution
+ */
+DEFINE_EVENT(block_rq, block_io_start,
+
+ TP_PROTO(struct request *rq),
+
+ TP_ARGS(rq)
+);
+
+/**
+ * block_io_done - block IO operation request completed
+ * @rq: block IO operation request
+ *
+ * Called when block operation request @rq is completed
+ */
+DEFINE_EVENT(block_rq, block_io_done,
+
+ TP_PROTO(struct request *rq),
+
+ TP_ARGS(rq)
+);
+
+/**
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
* @bio: block operation completed
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 8ea9cea9bfeb..90b0222390e5 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -21,7 +21,7 @@ struct btrfs_delayed_data_ref;
struct btrfs_delayed_ref_head;
struct btrfs_block_group;
struct btrfs_free_cluster;
-struct map_lookup;
+struct btrfs_chunk_map;
struct extent_buffer;
struct btrfs_work;
struct btrfs_workqueue;
@@ -38,7 +38,6 @@ struct find_free_extent_ctl;
__print_symbolic(type, \
{ BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
{ BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \
- { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \
{ BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \
{ BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
@@ -266,20 +265,20 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
__print_symbolic_u64(type, \
{ EXTENT_MAP_LAST_BYTE, "LAST_BYTE" }, \
{ EXTENT_MAP_HOLE, "HOLE" }, \
- { EXTENT_MAP_INLINE, "INLINE" }, \
- { EXTENT_MAP_DELALLOC, "DELALLOC" })
+ { EXTENT_MAP_INLINE, "INLINE" })
#define show_map_type(type) \
type, (type >= EXTENT_MAP_LAST_BYTE) ? "-" : __show_map_type(type)
#define show_map_flags(flag) \
__print_flags(flag, "|", \
- { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
- { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
- { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
- { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
- { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
- { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
+ { EXTENT_FLAG_PINNED, "PINNED" },\
+ { EXTENT_FLAG_COMPRESS_ZLIB, "COMPRESS_ZLIB" },\
+ { EXTENT_FLAG_COMPRESS_LZO, "COMPRESS_LZO" },\
+ { EXTENT_FLAG_COMPRESS_ZSTD, "COMPRESS_ZSTD" },\
+ { EXTENT_FLAG_PREALLOC, "PREALLOC" },\
+ { EXTENT_FLAG_LOGGING, "LOGGING" },\
+ { EXTENT_FLAG_FILLING, "FILLING" })
TRACE_EVENT_CONDITION(btrfs_get_extent,
@@ -298,9 +297,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__field( u64, orig_start )
__field( u64, block_start )
__field( u64, block_len )
- __field( unsigned long, flags )
+ __field( u32, flags )
__field( int, refs )
- __field( unsigned int, compress_type )
),
TP_fast_assign_btrfs(root->fs_info,
@@ -313,13 +311,11 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->block_len = map->block_len;
__entry->flags = map->flags;
__entry->refs = refcount_read(&map->refs);
- __entry->compress_type = map->compress_type;
),
TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
"orig_start=%llu block_start=%llu(%s) "
- "block_len=%llu flags=%s refs=%u "
- "compress_type=%u",
+ "block_len=%llu flags=%s refs=%u",
show_root_type(__entry->root_objectid),
__entry->ino,
__entry->start,
@@ -328,7 +324,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
show_map_type(__entry->block_start),
__entry->block_len,
show_map_flags(__entry->flags),
- __entry->refs, __entry->compress_type)
+ __entry->refs)
);
TRACE_EVENT(btrfs_handle_em_exist,
@@ -661,6 +657,35 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_mark_finished,
TP_ARGS(inode, ordered)
);
+TRACE_EVENT(btrfs_finish_ordered_extent,
+
+ TP_PROTO(const struct btrfs_inode *inode, u64 start, u64 len,
+ bool uptodate),
+
+ TP_ARGS(inode, start, len, uptodate),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, ino )
+ __field( u64, start )
+ __field( u64, len )
+ __field( bool, uptodate )
+ __field( u64, root_objectid )
+ ),
+
+ TP_fast_assign_btrfs(inode->root->fs_info,
+ __entry->ino = btrfs_ino(inode);
+ __entry->start = start;
+ __entry->len = len;
+ __entry->uptodate = uptodate;
+ __entry->root_objectid = inode->root->root_key.objectid;
+ ),
+
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu uptodate=%d",
+ show_root_type(__entry->root_objectid),
+ __entry->ino, __entry->start,
+ __entry->len, !!__entry->uptodate)
+);
+
DECLARE_EVENT_CLASS(btrfs__writepage,
TP_PROTO(const struct page *page, const struct inode *inode,
@@ -1033,7 +1058,7 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
DECLARE_EVENT_CLASS(btrfs__chunk,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size),
@@ -1067,7 +1092,7 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -1075,7 +1100,7 @@ DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct map_lookup *map, u64 offset, u64 size),
+ const struct btrfs_chunk_map *map, u64 offset, u64 size),
TP_ARGS(fs_info, map, offset, size)
);
@@ -1533,7 +1558,6 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( const void *, wq )
__field( const void *, func )
__field( const void *, ordered_func )
- __field( const void *, ordered_free )
__field( const void *, normal_work )
),
@@ -1542,14 +1566,12 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->wq = work->wq;
__entry->func = work->func;
__entry->ordered_func = work->ordered_func;
- __entry->ordered_free = work->ordered_free;
__entry->normal_work = &work->normal_work;
),
- TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p "
- "ordered_free=%p",
+ TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p",
__entry->work, __entry->normal_work, __entry->wq,
- __entry->func, __entry->ordered_func, __entry->ordered_free)
+ __entry->func, __entry->ordered_func)
);
/*
@@ -1982,25 +2004,27 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert,
);
TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
- TP_PROTO(const struct btrfs_root *root, u64 ino, int mod),
+ TP_PROTO(const struct btrfs_root *root, u64 ino, int mod, unsigned outstanding),
- TP_ARGS(root, ino, mod),
+ TP_ARGS(root, ino, mod, outstanding),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, ino )
__field( int, mod )
+ __field( unsigned, outstanding )
),
TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
__entry->ino = ino;
__entry->mod = mod;
+ __entry->outstanding = outstanding;
),
- TP_printk_btrfs("root=%llu(%s) ino=%llu mod=%d",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu mod=%d outstanding=%u",
show_root_type(__entry->root_objectid),
- __entry->ino, __entry->mod)
+ __entry->ino, __entry->mod, __entry->outstanding)
);
DECLARE_EVENT_CLASS(btrfs__block_group,
@@ -2072,17 +2096,12 @@ TRACE_EVENT(btrfs_set_extent_bit,
__field( unsigned, set_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2110,17 +2129,12 @@ TRACE_EVENT(btrfs_clear_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->clear_bits = clear_bits;
@@ -2149,17 +2163,12 @@ TRACE_EVENT(btrfs_convert_extent_bit,
__field( unsigned, clear_bits)
),
- TP_fast_assign_btrfs(tree->fs_info,
- __entry->owner = tree->owner;
- if (tree->inode) {
- const struct btrfs_inode *inode = tree->inode;
+ TP_fast_assign_btrfs(extent_io_tree_to_fs_info(tree),
+ const struct btrfs_inode *inode = extent_io_tree_to_inode_const(tree);
- __entry->ino = btrfs_ino(inode);
- __entry->rootid = inode->root->root_key.objectid;
- } else {
- __entry->ino = 0;
- __entry->rootid = 0;
- }
+ __entry->owner = tree->owner;
+ __entry->ino = inode ? btrfs_ino(inode) : 0;
+ __entry->rootid = inode ? inode->root->root_key.objectid : 0;
__entry->start = start;
__entry->len = len;
__entry->set_bits = set_bits;
@@ -2451,7 +2460,7 @@ DECLARE_EVENT_CLASS(btrfs_raid56_bio,
__entry->offset, __entry->opf, __entry->physical, __entry->len)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial,
+DEFINE_EVENT(btrfs_raid56_bio, raid56_read,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
@@ -2459,7 +2468,7 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial,
TP_ARGS(rbio, bio, trace_info)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_write_stripe,
+DEFINE_EVENT(btrfs_raid56_bio, raid56_write,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
@@ -2467,29 +2476,80 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_write_stripe,
TP_ARGS(rbio, bio, trace_info)
);
+TRACE_EVENT(btrfs_insert_one_raid_extent,
-DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_write_stripe,
- TP_PROTO(const struct btrfs_raid_bio *rbio,
- const struct bio *bio,
- const struct raid56_bio_trace_info *trace_info),
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
+ int num_stripes),
- TP_ARGS(rbio, bio, trace_info)
+ TP_ARGS(fs_info, logical, length, num_stripes),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, logical )
+ __field( u64, length )
+ __field( int, num_stripes )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->logical = logical;
+ __entry->length = length;
+ __entry->num_stripes = num_stripes;
+ ),
+
+ TP_printk_btrfs("logical=%llu length=%llu num_stripes=%d",
+ __entry->logical, __entry->length,
+ __entry->num_stripes)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read,
- TP_PROTO(const struct btrfs_raid_bio *rbio,
- const struct bio *bio,
- const struct raid56_bio_trace_info *trace_info),
+TRACE_EVENT(btrfs_raid_extent_delete,
- TP_ARGS(rbio, bio, trace_info)
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 end,
+ u64 found_start, u64 found_end),
+
+ TP_ARGS(fs_info, start, end, found_start, found_end),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, start )
+ __field( u64, end )
+ __field( u64, found_start )
+ __field( u64, found_end )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->start = start;
+ __entry->end = end;
+ __entry->found_start = found_start;
+ __entry->found_end = found_end;
+ ),
+
+ TP_printk_btrfs("start=%llu end=%llu found_start=%llu found_end=%llu",
+ __entry->start, __entry->end, __entry->found_start,
+ __entry->found_end)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read_recover,
- TP_PROTO(const struct btrfs_raid_bio *rbio,
- const struct bio *bio,
- const struct raid56_bio_trace_info *trace_info),
+TRACE_EVENT(btrfs_get_raid_extent_offset,
- TP_ARGS(rbio, bio, trace_info)
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
+ u64 physical, u64 devid),
+
+ TP_ARGS(fs_info, logical, length, physical, devid),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, logical )
+ __field( u64, length )
+ __field( u64, physical )
+ __field( u64, devid )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->logical = logical;
+ __entry->length = length;
+ __entry->physical = physical;
+ __entry->devid = devid;
+ ),
+
+ TP_printk_btrfs("logical=%llu length=%llu physical=%llu devid=%llu",
+ __entry->logical, __entry->length, __entry->physical,
+ __entry->devid)
);
#endif /* _TRACE_BTRFS_H */
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 3313eb83c117..2b2a975efd20 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -64,6 +64,17 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
);
+DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_fast_isolate_freepages,
+
+ TP_PROTO(
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ unsigned long nr_scanned,
+ unsigned long nr_taken),
+
+ TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
+);
+
#ifdef CONFIG_COMPACTION
TRACE_EVENT(mm_compaction_migratepages,
diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h
new file mode 100644
index 000000000000..58cc83b99c34
--- /dev/null
+++ b/include/trace/events/csd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM csd
+
+#if !defined(_TRACE_CSD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CSD_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(csd_queue_cpu,
+
+ TP_PROTO(const unsigned int cpu,
+ unsigned long callsite,
+ smp_call_func_t func,
+ call_single_data_t *csd),
+
+ TP_ARGS(cpu, callsite, func, csd),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(void *, callsite)
+ __field(void *, func)
+ __field(void *, csd)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->callsite = (void *)callsite;
+ __entry->func = func;
+ __entry->csd = csd;
+ ),
+
+ TP_printk("cpu=%u callsite=%pS func=%ps csd=%p",
+ __entry->cpu, __entry->callsite, __entry->func, __entry->csd)
+ );
+
+/*
+ * Tracepoints for a function which is called as an effect of smp_call_function.*
+ */
+DECLARE_EVENT_CLASS(csd_function,
+
+ TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+
+ TP_ARGS(func, csd),
+
+ TP_STRUCT__entry(
+ __field(void *, func)
+ __field(void *, csd)
+ ),
+
+ TP_fast_assign(
+ __entry->func = func;
+ __entry->csd = csd;
+ ),
+
+ TP_printk("func=%ps, csd=%p", __entry->func, __entry->csd)
+);
+
+DEFINE_EVENT(csd_function, csd_function_entry,
+ TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+ TP_ARGS(func, csd)
+);
+
+DEFINE_EVENT(csd_function, csd_function_exit,
+ TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
+ TP_ARGS(func, csd)
+);
+
+#endif /* _TRACE_CSD_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index c79f1d4c39af..23200aabccac 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -9,12 +9,51 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+TRACE_EVENT_CONDITION(damos_before_apply,
+
+ TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+ unsigned int target_idx, struct damon_region *r,
+ unsigned int nr_regions, bool do_trace),
+
+ TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace),
+
+ TP_CONDITION(do_trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, context_idx)
+ __field(unsigned int, scheme_idx)
+ __field(unsigned long, target_idx)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned int, nr_accesses)
+ __field(unsigned int, age)
+ __field(unsigned int, nr_regions)
+ ),
+
+ TP_fast_assign(
+ __entry->context_idx = context_idx;
+ __entry->scheme_idx = scheme_idx;
+ __entry->target_idx = target_idx;
+ __entry->start = r->ar.start;
+ __entry->end = r->ar.end;
+ __entry->nr_accesses = r->nr_accesses_bp / 10000;
+ __entry->age = r->age;
+ __entry->nr_regions = nr_regions;
+ ),
+
+ TP_printk("ctx_idx=%u scheme_idx=%u target_idx=%lu nr_regions=%u %lu-%lu: %u %u",
+ __entry->context_idx, __entry->scheme_idx,
+ __entry->target_idx, __entry->nr_regions,
+ __entry->start, __entry->end,
+ __entry->nr_accesses, __entry->age)
+);
+
TRACE_EVENT(damon_aggregated,
- TP_PROTO(struct damon_target *t, unsigned int target_id,
- struct damon_region *r, unsigned int nr_regions),
+ TP_PROTO(unsigned int target_id, struct damon_region *r,
+ unsigned int nr_regions),
- TP_ARGS(t, target_id, r, nr_regions),
+ TP_ARGS(target_id, r, nr_regions),
TP_STRUCT__entry(
__field(unsigned long, target_id)
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
index 2b09574e1243..c1a146f9fc91 100644
--- a/include/trace/events/dlm.h
+++ b/include/trace/events/dlm.h
@@ -7,6 +7,7 @@
#include <linux/dlm.h>
#include <linux/dlmconstants.h>
+#include <uapi/linux/dlm_plock.h>
#include <linux/tracepoint.h>
#include "../../../fs/dlm/dlm_internal.h"
@@ -585,6 +586,56 @@ TRACE_EVENT(dlm_recv_message,
);
+DECLARE_EVENT_CLASS(dlm_plock_template,
+
+ TP_PROTO(const struct dlm_plock_info *info),
+
+ TP_ARGS(info),
+
+ TP_STRUCT__entry(
+ __field(uint8_t, optype)
+ __field(uint8_t, ex)
+ __field(uint8_t, wait)
+ __field(uint8_t, flags)
+ __field(uint32_t, pid)
+ __field(int32_t, nodeid)
+ __field(int32_t, rv)
+ __field(uint32_t, fsid)
+ __field(uint64_t, number)
+ __field(uint64_t, start)
+ __field(uint64_t, end)
+ __field(uint64_t, owner)
+ ),
+
+ TP_fast_assign(
+ __entry->optype = info->optype;
+ __entry->ex = info->ex;
+ __entry->wait = info->wait;
+ __entry->flags = info->flags;
+ __entry->pid = info->pid;
+ __entry->nodeid = info->nodeid;
+ __entry->rv = info->rv;
+ __entry->fsid = info->fsid;
+ __entry->number = info->number;
+ __entry->start = info->start;
+ __entry->end = info->end;
+ __entry->owner = info->owner;
+ ),
+
+ TP_printk("fsid=%u number=%llx owner=%llx optype=%d ex=%d wait=%d flags=%x pid=%u nodeid=%d rv=%d start=%llx end=%llx",
+ __entry->fsid, __entry->number, __entry->owner,
+ __entry->optype, __entry->ex, __entry->wait,
+ __entry->flags, __entry->pid, __entry->nodeid,
+ __entry->rv, __entry->start, __entry->end)
+
+);
+
+DEFINE_EVENT(dlm_plock_template, dlm_plock_read,
+ TP_PROTO(const struct dlm_plock_info *info), TP_ARGS(info));
+
+DEFINE_EVENT(dlm_plock_template, dlm_plock_write,
+ TP_PROTO(const struct dlm_plock_info *info), TP_ARGS(info));
+
TRACE_EVENT(dlm_send,
TP_PROTO(int nodeid, int ret),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index 71dbe8bfa7db..e18684b02c3d 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -80,11 +80,11 @@ TRACE_EVENT(erofs_fill_inode,
__entry->blkaddr, __entry->ofs)
);
-TRACE_EVENT(erofs_readpage,
+TRACE_EVENT(erofs_read_folio,
- TP_PROTO(struct page *page, bool raw),
+ TP_PROTO(struct folio *folio, bool raw),
- TP_ARGS(page, raw),
+ TP_ARGS(folio, raw),
TP_STRUCT__entry(
__field(dev_t, dev )
@@ -96,11 +96,11 @@ TRACE_EVENT(erofs_readpage,
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_I(page->mapping->host)->nid;
- __entry->dir = S_ISDIR(page->mapping->host->i_mode);
- __entry->index = page->index;
- __entry->uptodate = PageUptodate(page);
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->nid = EROFS_I(folio->mapping->host)->nid;
+ __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->index = folio->index;
+ __entry->uptodate = folio_test_uptodate(folio);
__entry->raw = raw;
),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index ebccf6a6aa1b..a697f4b77162 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -120,6 +120,20 @@ TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX);
{ EXT4_FC_REASON_INODE_JOURNAL_DATA, "INODE_JOURNAL_DATA"}, \
{ EXT4_FC_REASON_ENCRYPTED_FILENAME, "ENCRYPTED_FILENAME"})
+TRACE_DEFINE_ENUM(CR_POWER2_ALIGNED);
+TRACE_DEFINE_ENUM(CR_GOAL_LEN_FAST);
+TRACE_DEFINE_ENUM(CR_BEST_AVAIL_LEN);
+TRACE_DEFINE_ENUM(CR_GOAL_LEN_SLOW);
+TRACE_DEFINE_ENUM(CR_ANY_FREE);
+
+#define show_criteria(cr) \
+ __print_symbolic(cr, \
+ { CR_POWER2_ALIGNED, "CR_POWER2_ALIGNED" }, \
+ { CR_GOAL_LEN_FAST, "CR_GOAL_LEN_FAST" }, \
+ { CR_BEST_AVAIL_LEN, "CR_BEST_AVAIL_LEN" }, \
+ { CR_GOAL_LEN_SLOW, "CR_GOAL_LEN_SLOW" }, \
+ { CR_ANY_FREE, "CR_ANY_FREE" })
+
TRACE_EVENT(ext4_other_inode_update_time,
TP_PROTO(struct inode *inode, ino_t orig_ino),
@@ -560,10 +574,10 @@ TRACE_EVENT(ext4_writepages_result,
(unsigned long) __entry->writeback_index)
);
-DECLARE_EVENT_CLASS(ext4__page_op,
- TP_PROTO(struct page *page),
+DECLARE_EVENT_CLASS(ext4__folio_op,
+ TP_PROTO(struct inode *inode, struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(inode, folio),
TP_STRUCT__entry(
__field( dev_t, dev )
@@ -573,29 +587,29 @@ DECLARE_EVENT_CLASS(ext4__page_op,
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->ino = page->mapping->host->i_ino;
- __entry->index = page->index;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->index = folio->index;
),
- TP_printk("dev %d,%d ino %lu page_index %lu",
+ TP_printk("dev %d,%d ino %lu folio_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
(unsigned long) __entry->index)
);
-DEFINE_EVENT(ext4__page_op, ext4_readpage,
+DEFINE_EVENT(ext4__folio_op, ext4_read_folio,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct inode *inode, struct folio *folio),
- TP_ARGS(page)
+ TP_ARGS(inode, folio)
);
-DEFINE_EVENT(ext4__page_op, ext4_releasepage,
+DEFINE_EVENT(ext4__folio_op, ext4_release_folio,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct inode *inode, struct folio *folio),
- TP_ARGS(page)
+ TP_ARGS(inode, folio)
);
DECLARE_EVENT_CLASS(ext4_invalidate_folio_op,
@@ -758,15 +772,14 @@ TRACE_EVENT(ext4_mb_release_group_pa,
);
TRACE_EVENT(ext4_discard_preallocations,
- TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed),
+ TP_PROTO(struct inode *inode, unsigned int len),
- TP_ARGS(inode, len, needed),
+ TP_ARGS(inode, len),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( unsigned int, len )
- __field( unsigned int, needed )
),
@@ -774,13 +787,11 @@ TRACE_EVENT(ext4_discard_preallocations,
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->len = len;
- __entry->needed = needed;
),
- TP_printk("dev %d,%d ino %lu len: %u needed %u",
+ TP_printk("dev %d,%d ino %lu len: %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->len,
- __entry->needed)
+ (unsigned long) __entry->ino, __entry->len)
);
TRACE_EVENT(ext4_mb_discard_preallocations,
@@ -1063,7 +1074,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
),
TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
- "result %u/%d/%u@%u blks %u grps %u cr %u flags %s "
+ "result %u/%d/%u@%u blks %u grps %u cr %s flags %s "
"tail %u broken %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,
@@ -1073,7 +1084,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
__entry->goal_len, __entry->goal_logical,
__entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical,
- __entry->found, __entry->groups, __entry->cr,
+ __entry->found, __entry->groups, show_criteria(__entry->cr),
show_mballoc_flags(__entry->flags), __entry->tail,
__entry->buddy ? 1 << __entry->buddy : 0)
);
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 99cbc5949e3c..7ed0fc430dc6 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -161,6 +161,19 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
{ EX_READ, "Read" }, \
{ EX_BLOCK_AGE, "Block Age" })
+#define show_inode_type(x) \
+ __print_symbolic(x, \
+ { S_IFLNK, "symbolic" }, \
+ { S_IFREG, "regular" }, \
+ { S_IFDIR, "directory" }, \
+ { S_IFCHR, "character" }, \
+ { S_IFBLK, "block" }, \
+ { S_IFIFO, "fifo" }, \
+ { S_IFSOCK, "sock" })
+
+#define S_ALL_PERM (S_ISUID | S_ISGID | S_ISVTX | \
+ S_IRWXU | S_IRWXG | S_IRWXO)
+
struct f2fs_sb_info;
struct f2fs_io_info;
struct extent_info;
@@ -215,17 +228,21 @@ DECLARE_EVENT_CLASS(f2fs__inode_exit,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
+ __field(umode_t, mode)
__field(int, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
+ __entry->mode = inode->i_mode;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, ret = %d",
+ TP_printk("dev = (%d,%d), ino = %lu, type: %s, mode = 0%o, ret = %d",
show_dev_ino(__entry),
+ show_inode_type(__entry->mode & S_IFMT),
+ __entry->mode & S_ALL_PERM,
__entry->ret)
);
@@ -866,6 +883,75 @@ TRACE_EVENT(f2fs_lookup_end,
__entry->err)
);
+TRACE_EVENT(f2fs_rename_start,
+
+ TP_PROTO(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags),
+
+ TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, flags),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __string(old_name, old_dentry->d_name.name)
+ __field(ino_t, new_pino)
+ __string(new_name, new_dentry->d_name.name)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = old_dir->i_sb->s_dev;
+ __entry->ino = old_dir->i_ino;
+ __assign_str(old_name, old_dentry->d_name.name);
+ __entry->new_pino = new_dir->i_ino;
+ __assign_str(new_name, new_dentry->d_name.name);
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev = (%d,%d), old_dir = %lu, old_name: %s, "
+ "new_dir = %lu, new_name: %s, flags = %u",
+ show_dev_ino(__entry),
+ __get_str(old_name),
+ __entry->new_pino,
+ __get_str(new_name),
+ __entry->flags)
+);
+
+TRACE_EVENT(f2fs_rename_end,
+
+ TP_PROTO(struct dentry *old_dentry, struct dentry *new_dentry,
+ unsigned int flags, int ret),
+
+ TP_ARGS(old_dentry, new_dentry, flags, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __string(old_name, old_dentry->d_name.name)
+ __string(new_name, new_dentry->d_name.name)
+ __field(unsigned int, flags)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = old_dentry->d_sb->s_dev;
+ __entry->ino = old_dentry->d_inode->i_ino;
+ __assign_str(old_name, old_dentry->d_name.name);
+ __assign_str(new_name, new_dentry->d_name.name);
+ __entry->flags = flags;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, old_name: %s, "
+ "new_name: %s, flags = %u, ret = %d",
+ show_dev_ino(__entry),
+ __get_str(old_name),
+ __get_str(new_name),
+ __entry->flags,
+ __entry->ret)
+);
+
TRACE_EVENT(f2fs_readdir,
TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err),
@@ -1283,13 +1369,6 @@ DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty,
TP_ARGS(page, type)
);
-DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
-
- TP_PROTO(struct page *page, int type),
-
- TP_ARGS(page, type)
-);
-
TRACE_EVENT(f2fs_replace_atomic_write_block,
TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index,
@@ -1327,30 +1406,50 @@ TRACE_EVENT(f2fs_replace_atomic_write_block,
__entry->recovery)
);
-TRACE_EVENT(f2fs_filemap_fault,
+DECLARE_EVENT_CLASS(f2fs_mmap,
- TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
- TP_ARGS(inode, index, ret),
+ TP_ARGS(inode, index, flags, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
- __field(unsigned long, ret)
+ __field(vm_flags_t, flags)
+ __field(vm_fault_t, ret)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
__entry->index = index;
+ __entry->flags = flags;
__entry->ret = ret;
),
- TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx",
+ TP_printk("dev = (%d,%d), ino = %lu, index = %lu, flags: %s, ret: %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
- __entry->ret)
+ __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
+ __print_flags(__entry->ret, "|", VM_FAULT_RESULT_TRACE))
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_filemap_fault,
+
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
+
+ TP_ARGS(inode, index, flags, ret)
+);
+
+DEFINE_EVENT(f2fs_mmap, f2fs_vm_page_mkwrite,
+
+ TP_PROTO(struct inode *inode, pgoff_t index,
+ vm_flags_t flags, vm_fault_t ret),
+
+ TP_ARGS(inode, index, flags, ret)
);
TRACE_EVENT(f2fs_writepages,
@@ -1512,7 +1611,7 @@ DEFINE_EVENT(f2fs_discard, f2fs_remove_discard,
TP_ARGS(dev, blkstart, blklen)
);
-TRACE_EVENT(f2fs_issue_reset_zone,
+DECLARE_EVENT_CLASS(f2fs_reset_zone,
TP_PROTO(struct block_device *dev, block_t blkstart),
@@ -1528,11 +1627,25 @@ TRACE_EVENT(f2fs_issue_reset_zone,
__entry->blkstart = blkstart;
),
- TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+ TP_printk("dev = (%d,%d), zone at block = 0x%llx",
show_dev(__entry->dev),
(unsigned long long)__entry->blkstart)
);
+DEFINE_EVENT(f2fs_reset_zone, f2fs_queue_reset_zone,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart),
+
+ TP_ARGS(dev, blkstart)
+);
+
+DEFINE_EVENT(f2fs_reset_zone, f2fs_issue_reset_zone,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart),
+
+ TP_ARGS(dev, blkstart)
+);
+
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct block_device *dev, unsigned int nobarrier,
@@ -1979,6 +2092,7 @@ TRACE_EVENT(f2fs_iostat,
__field(unsigned long long, fs_nrio)
__field(unsigned long long, fs_mrio)
__field(unsigned long long, fs_discard)
+ __field(unsigned long long, fs_reset_zone)
),
TP_fast_assign(
@@ -2010,12 +2124,14 @@ TRACE_EVENT(f2fs_iostat,
__entry->fs_nrio = iostat[FS_NODE_READ_IO];
__entry->fs_mrio = iostat[FS_META_READ_IO];
__entry->fs_discard = iostat[FS_DISCARD_IO];
+ __entry->fs_reset_zone = iostat[FS_ZONE_RESET_IO];
),
TP_printk("dev = (%d,%d), "
"app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu, "
"compr(buffered=%llu, mapped=%llu)], "
- "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu], "
+ "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu, "
+ "reset_zone=%llu], "
"gc [data=%llu, node=%llu], "
"cp [data=%llu, node=%llu, meta=%llu], "
"app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
@@ -2026,6 +2142,7 @@ TRACE_EVENT(f2fs_iostat,
__entry->app_bio, __entry->app_mio, __entry->app_bcdio,
__entry->app_mcdio, __entry->fs_dio, __entry->fs_cdio,
__entry->fs_nio, __entry->fs_mio, __entry->fs_discard,
+ __entry->fs_reset_zone,
__entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio,
__entry->fs_cp_nio, __entry->fs_cp_mio,
__entry->app_rio, __entry->app_drio, __entry->app_brio,
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 76297ecd4935..20b914250ce9 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -65,7 +65,7 @@ TRACE_EVENT(fib_table_lookup,
}
dev = nhc ? nhc->nhc_dev : NULL;
- strlcpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ);
+ strscpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ);
if (nhc) {
if (nhc->nhc_gw_family == AF_INET) {
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index 4d3e607b3cde..5d7ee2610728 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -63,7 +63,7 @@ TRACE_EVENT(fib6_table_lookup,
}
if (res->nh && res->nh->fib_nh_dev) {
- strlcpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ);
+ strscpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ);
} else {
strcpy(__entry->name, "-");
}
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index c9a72e8432b8..5ff15126ad9d 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -122,6 +122,37 @@ TRACE_EVENT(fsi_master_break,
)
);
+TRACE_EVENT(fsi_master_scan,
+ TP_PROTO(const struct fsi_master *master, bool scan),
+ TP_ARGS(master, scan),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ __field(bool, scan)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ __entry->scan = scan;
+ ),
+ TP_printk("fsi%d (%d links) %s", __entry->master_idx, __entry->n_links,
+ __entry->scan ? "scan" : "unscan")
+);
+
+TRACE_EVENT(fsi_master_unregister,
+ TP_PROTO(const struct fsi_master *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ ),
+ TP_printk("fsi%d (%d links)", __entry->master_idx, __entry->n_links)
+);
+
TRACE_EVENT(fsi_slave_init,
TP_PROTO(const struct fsi_slave *slave),
TP_ARGS(slave),
diff --git a/include/trace/events/fsi_master_i2cr.h b/include/trace/events/fsi_master_i2cr.h
new file mode 100644
index 000000000000..c33eba130049
--- /dev/null
+++ b/include/trace/events/fsi_master_i2cr.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_i2cr
+
+#if !defined(_TRACE_FSI_MASTER_I2CR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_I2CR_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(i2cr_i2c_error,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, int rc),
+ TP_ARGS(client, command, rc),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __field(int, rc)
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ __entry->rc = rc;
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } rc:%d", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, __entry->rc)
+);
+
+TRACE_EVENT(i2cr_read,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t *data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+TRACE_EVENT(i2cr_status,
+ TP_PROTO(const struct i2c_client *client, uint64_t status),
+ TP_ARGS(client, status),
+ TP_STRUCT__entry(
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x %016llx", __entry->bus, __entry->addr, __entry->status)
+);
+
+TRACE_EVENT(i2cr_status_error,
+ TP_PROTO(const struct i2c_client *client, uint64_t status, uint64_t error, uint64_t log),
+ TP_ARGS(client, status, error, log),
+ TP_STRUCT__entry(
+ __field(uint64_t, error)
+ __field(uint64_t, log)
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->log = log;
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x status:%016llx error:%016llx log:%016llx", __entry->bus, __entry->addr,
+ __entry->status, __entry->error, __entry->log)
+);
+
+TRACE_EVENT(i2cr_write,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, &data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h
index 951643e6a7a9..a78d21fa9f29 100644
--- a/include/trace/events/habanalabs.h
+++ b/include/trace/events/habanalabs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2021 HabanaLabs, Ltd.
+ * Copyright 2022-2023 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -87,6 +87,49 @@ DEFINE_EVENT(habanalabs_dma_alloc_template, habanalabs_dma_free,
TP_PROTO(struct device *dev, u64 cpu_addr, u64 dma_addr, size_t size, const char *caller),
TP_ARGS(dev, cpu_addr, dma_addr, size, caller));
+DECLARE_EVENT_CLASS(habanalabs_dma_map_template,
+ TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len,
+ enum dma_data_direction dir, const char *caller),
+
+ TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller),
+
+ TP_STRUCT__entry(
+ __string(dname, dev_name(dev))
+ __field(u64, phys_addr)
+ __field(u64, dma_addr)
+ __field(u32, len)
+ __field(int, dir)
+ __field(const char *, caller)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dname, dev_name(dev));
+ __entry->phys_addr = phys_addr;
+ __entry->dma_addr = dma_addr;
+ __entry->len = len;
+ __entry->dir = dir;
+ __entry->caller = caller;
+ ),
+
+ TP_printk("%s: phys_addr: %#llx, dma_addr: %#llx, len: %#x, dir: %d, caller: %s",
+ __get_str(dname),
+ __entry->phys_addr,
+ __entry->dma_addr,
+ __entry->len,
+ __entry->dir,
+ __entry->caller)
+);
+
+DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_map_page,
+ TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len,
+ enum dma_data_direction dir, const char *caller),
+ TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller));
+
+DEFINE_EVENT(habanalabs_dma_map_template, habanalabs_dma_unmap_page,
+ TP_PROTO(struct device *dev, u64 phys_addr, u64 dma_addr, size_t len,
+ enum dma_data_direction dir, const char *caller),
+ TP_ARGS(dev, phys_addr, dma_addr, len, dir, caller));
+
DECLARE_EVENT_CLASS(habanalabs_comms_template,
TP_PROTO(struct device *dev, char *op_str),
diff --git a/include/trace/events/handshake.h b/include/trace/events/handshake.h
index 8dadcab5f12a..bdd8a03cf5ba 100644
--- a/include/trace/events/handshake.h
+++ b/include/trace/events/handshake.h
@@ -6,7 +6,86 @@
#define _TRACE_HANDSHAKE_H
#include <linux/net.h>
+#include <net/tls_prot.h>
#include <linux/tracepoint.h>
+#include <trace/events/net_probe_common.h>
+
+#define TLS_RECORD_TYPE_LIST \
+ record_type(CHANGE_CIPHER_SPEC) \
+ record_type(ALERT) \
+ record_type(HANDSHAKE) \
+ record_type(DATA) \
+ record_type(HEARTBEAT) \
+ record_type(TLS12_CID) \
+ record_type_end(ACK)
+
+#undef record_type
+#undef record_type_end
+#define record_type(x) TRACE_DEFINE_ENUM(TLS_RECORD_TYPE_##x);
+#define record_type_end(x) TRACE_DEFINE_ENUM(TLS_RECORD_TYPE_##x);
+
+TLS_RECORD_TYPE_LIST
+
+#undef record_type
+#undef record_type_end
+#define record_type(x) { TLS_RECORD_TYPE_##x, #x },
+#define record_type_end(x) { TLS_RECORD_TYPE_##x, #x }
+
+#define show_tls_content_type(type) \
+ __print_symbolic(type, TLS_RECORD_TYPE_LIST)
+
+TRACE_DEFINE_ENUM(TLS_ALERT_LEVEL_WARNING);
+TRACE_DEFINE_ENUM(TLS_ALERT_LEVEL_FATAL);
+
+#define show_tls_alert_level(level) \
+ __print_symbolic(level, \
+ { TLS_ALERT_LEVEL_WARNING, "Warning" }, \
+ { TLS_ALERT_LEVEL_FATAL, "Fatal" })
+
+#define TLS_ALERT_DESCRIPTION_LIST \
+ alert_description(CLOSE_NOTIFY) \
+ alert_description(UNEXPECTED_MESSAGE) \
+ alert_description(BAD_RECORD_MAC) \
+ alert_description(RECORD_OVERFLOW) \
+ alert_description(HANDSHAKE_FAILURE) \
+ alert_description(BAD_CERTIFICATE) \
+ alert_description(UNSUPPORTED_CERTIFICATE) \
+ alert_description(CERTIFICATE_REVOKED) \
+ alert_description(CERTIFICATE_EXPIRED) \
+ alert_description(CERTIFICATE_UNKNOWN) \
+ alert_description(ILLEGAL_PARAMETER) \
+ alert_description(UNKNOWN_CA) \
+ alert_description(ACCESS_DENIED) \
+ alert_description(DECODE_ERROR) \
+ alert_description(DECRYPT_ERROR) \
+ alert_description(TOO_MANY_CIDS_REQUESTED) \
+ alert_description(PROTOCOL_VERSION) \
+ alert_description(INSUFFICIENT_SECURITY) \
+ alert_description(INTERNAL_ERROR) \
+ alert_description(INAPPROPRIATE_FALLBACK) \
+ alert_description(USER_CANCELED) \
+ alert_description(MISSING_EXTENSION) \
+ alert_description(UNSUPPORTED_EXTENSION) \
+ alert_description(UNRECOGNIZED_NAME) \
+ alert_description(BAD_CERTIFICATE_STATUS_RESPONSE) \
+ alert_description(UNKNOWN_PSK_IDENTITY) \
+ alert_description(CERTIFICATE_REQUIRED) \
+ alert_description_end(NO_APPLICATION_PROTOCOL)
+
+#undef alert_description
+#undef alert_description_end
+#define alert_description(x) TRACE_DEFINE_ENUM(TLS_ALERT_DESC_##x);
+#define alert_description_end(x) TRACE_DEFINE_ENUM(TLS_ALERT_DESC_##x);
+
+TLS_ALERT_DESCRIPTION_LIST
+
+#undef alert_description
+#undef alert_description_end
+#define alert_description(x) { TLS_ALERT_DESC_##x, #x },
+#define alert_description_end(x) { TLS_ALERT_DESC_##x, #x }
+
+#define show_tls_alert_description(desc) \
+ __print_symbolic(desc, TLS_ALERT_DESCRIPTION_LIST)
DECLARE_EVENT_CLASS(handshake_event_class,
TP_PROTO(
@@ -106,6 +185,47 @@ DECLARE_EVENT_CLASS(handshake_error_class,
), \
TP_ARGS(net, req, sk, err))
+DECLARE_EVENT_CLASS(handshake_alert_class,
+ TP_PROTO(
+ const struct sock *sk,
+ unsigned char level,
+ unsigned char description
+ ),
+ TP_ARGS(sk, level, description),
+ TP_STRUCT__entry(
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, netns_ino)
+ __field(unsigned long, level)
+ __field(unsigned long, description)
+ ),
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ __entry->netns_ino = sock_net(sk)->ns.inum;
+ __entry->level = level;
+ __entry->description = description;
+ ),
+ TP_printk("src=%pISpc dest=%pISpc %s: %s",
+ __entry->saddr, __entry->daddr,
+ show_tls_alert_level(__entry->level),
+ show_tls_alert_description(__entry->description)
+ )
+);
+#define DEFINE_HANDSHAKE_ALERT(name) \
+ DEFINE_EVENT(handshake_alert_class, name, \
+ TP_PROTO( \
+ const struct sock *sk, \
+ unsigned char level, \
+ unsigned char description \
+ ), \
+ TP_ARGS(sk, level, description))
+
/*
* Request lifetime events
@@ -154,6 +274,46 @@ DEFINE_HANDSHAKE_ERROR(handshake_cmd_accept_err);
DEFINE_HANDSHAKE_FD_EVENT(handshake_cmd_done);
DEFINE_HANDSHAKE_ERROR(handshake_cmd_done_err);
+/*
+ * TLS Record events
+ */
+
+TRACE_EVENT(tls_contenttype,
+ TP_PROTO(
+ const struct sock *sk,
+ unsigned char type
+ ),
+ TP_ARGS(sk, type),
+ TP_STRUCT__entry(
+ /* sockaddr_in6 is always bigger than sockaddr_in */
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, netns_ino)
+ __field(unsigned long, type)
+ ),
+ TP_fast_assign(
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+ __entry->netns_ino = sock_net(sk)->ns.inum;
+ __entry->type = type;
+ ),
+ TP_printk("src=%pISpc dest=%pISpc %s",
+ __entry->saddr, __entry->daddr,
+ show_tls_content_type(__entry->type)
+ )
+);
+
+/*
+ * TLS Alert events
+ */
+
+DEFINE_HANDSHAKE_ALERT(tls_alert_send);
+DEFINE_HANDSHAKE_ALERT(tls_alert_recv);
+
#endif /* _TRACE_HANDSHAKE_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h
index d7353024016c..af0af3f1d9b7 100644
--- a/include/trace/events/intel_ifs.h
+++ b/include/trace/events/intel_ifs.h
@@ -10,25 +10,25 @@
TRACE_EVENT(ifs_status,
- TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status),
+ TP_PROTO(int cpu, int start, int stop, u64 status),
- TP_ARGS(cpu, activate, status),
+ TP_ARGS(cpu, start, stop, status),
TP_STRUCT__entry(
__field( u64, status )
__field( int, cpu )
- __field( u8, start )
- __field( u8, stop )
+ __field( u16, start )
+ __field( u16, stop )
),
TP_fast_assign(
__entry->cpu = cpu;
- __entry->start = activate.start;
- __entry->stop = activate.stop;
- __entry->status = status.data;
+ __entry->start = start;
+ __entry->stop = stop;
+ __entry->status = status;
),
- TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx",
+ TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx",
__entry->cpu,
__entry->start,
__entry->stop,
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 8f5ee380d309..5646ae15a957 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -462,11 +462,9 @@ TRACE_EVENT(jbd2_shrink_scan_exit,
TRACE_EVENT(jbd2_shrink_checkpoint_list,
TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid,
- unsigned long nr_freed, unsigned long nr_scanned,
- tid_t next_tid),
+ unsigned long nr_freed, tid_t next_tid),
- TP_ARGS(journal, first_tid, tid, last_tid, nr_freed,
- nr_scanned, next_tid),
+ TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -474,7 +472,6 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
__field(tid_t, tid)
__field(tid_t, last_tid)
__field(unsigned long, nr_freed)
- __field(unsigned long, nr_scanned)
__field(tid_t, next_tid)
),
@@ -484,15 +481,14 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list,
__entry->tid = tid;
__entry->last_tid = last_tid;
__entry->nr_freed = nr_freed;
- __entry->nr_scanned = nr_scanned;
__entry->next_tid = next_tid;
),
TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu "
- "scanned %lu next transaction %u",
+ "next transaction %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->first_tid, __entry->tid, __entry->last_tid,
- __entry->nr_freed, __entry->nr_scanned, __entry->next_tid)
+ __entry->nr_freed, __entry->next_tid)
);
#endif /* _TRACE_JBD2_H */
diff --git a/include/trace/events/ksm.h b/include/trace/events/ksm.h
index b5ac35c1d0e8..e728647b5d26 100644
--- a/include/trace/events/ksm.h
+++ b/include/trace/events/ksm.h
@@ -245,6 +245,39 @@ TRACE_EVENT(ksm_remove_rmap_item,
__entry->pfn, __entry->rmap_item, __entry->mm)
);
+/**
+ * ksm_advisor - called after the advisor has run
+ *
+ * @scan_time: scan time in seconds
+ * @pages_to_scan: new pages_to_scan value
+ * @cpu_percent: cpu usage in percent
+ *
+ * Allows to trace the ksm advisor.
+ */
+TRACE_EVENT(ksm_advisor,
+
+ TP_PROTO(s64 scan_time, unsigned long pages_to_scan,
+ unsigned int cpu_percent),
+
+ TP_ARGS(scan_time, pages_to_scan, cpu_percent),
+
+ TP_STRUCT__entry(
+ __field(s64, scan_time)
+ __field(unsigned long, pages_to_scan)
+ __field(unsigned int, cpu_percent)
+ ),
+
+ TP_fast_assign(
+ __entry->scan_time = scan_time;
+ __entry->pages_to_scan = pages_to_scan;
+ __entry->cpu_percent = cpu_percent;
+ ),
+
+ TP_printk("ksm scan time %lld pages_to_scan %lu cpu percent %u",
+ __entry->scan_time, __entry->pages_to_scan,
+ __entry->cpu_percent)
+);
+
#endif /* _TRACE_KSM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 3bd31ea23fee..011fba6b5552 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -62,7 +62,7 @@ TRACE_EVENT(kvm_vcpu_wakeup,
__entry->valid ? "valid" : "invalid")
);
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#if defined(CONFIG_HAVE_KVM_IRQCHIP)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
@@ -82,7 +82,7 @@ TRACE_EVENT(kvm_set_irq,
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
#if defined(__KVM_HAVE_IOAPIC)
#define kvm_deliver_mode \
@@ -170,7 +170,7 @@ TRACE_EVENT(kvm_msi_set_irq,
#endif /* defined(__KVM_HAVE_IOAPIC) */
-#if defined(CONFIG_HAVE_KVM_IRQFD)
+#if defined(CONFIG_HAVE_KVM_IRQCHIP)
#ifdef kvm_irqchips
#define kvm_ack_irq_string "irqchip %s pin %u"
@@ -197,7 +197,7 @@ TRACE_EVENT(kvm_ack_irq,
TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
+#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index bf7533f171ff..9d44781efc1c 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
TP_fast_assign(
__entry->dev = dev;
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
- strlcpy(__entry->type, type, sizeof(__entry->type));
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
__entry->numerator = numerator;
__entry->denominator = denominator;
@@ -59,7 +59,7 @@ TRACE_EVENT(kyber_adjust,
TP_fast_assign(
__entry->dev = dev;
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
@@ -81,7 +81,7 @@ TRACE_EVENT(kyber_throttled,
TP_fast_assign(
__entry->dev = dev;
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
),
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 061b5128f335..0190ef725b43 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -49,10 +49,11 @@ TRACE_EVENT(mm_migrate_pages,
TP_PROTO(unsigned long succeeded, unsigned long failed,
unsigned long thp_succeeded, unsigned long thp_failed,
- unsigned long thp_split, enum migrate_mode mode, int reason),
+ unsigned long thp_split, unsigned long large_folio_split,
+ enum migrate_mode mode, int reason),
TP_ARGS(succeeded, failed, thp_succeeded, thp_failed,
- thp_split, mode, reason),
+ thp_split, large_folio_split, mode, reason),
TP_STRUCT__entry(
__field( unsigned long, succeeded)
@@ -60,26 +61,29 @@ TRACE_EVENT(mm_migrate_pages,
__field( unsigned long, thp_succeeded)
__field( unsigned long, thp_failed)
__field( unsigned long, thp_split)
+ __field( unsigned long, large_folio_split)
__field( enum migrate_mode, mode)
__field( int, reason)
),
TP_fast_assign(
- __entry->succeeded = succeeded;
- __entry->failed = failed;
- __entry->thp_succeeded = thp_succeeded;
- __entry->thp_failed = thp_failed;
- __entry->thp_split = thp_split;
- __entry->mode = mode;
- __entry->reason = reason;
+ __entry->succeeded = succeeded;
+ __entry->failed = failed;
+ __entry->thp_succeeded = thp_succeeded;
+ __entry->thp_failed = thp_failed;
+ __entry->thp_split = thp_split;
+ __entry->large_folio_split = large_folio_split;
+ __entry->mode = mode;
+ __entry->reason = reason;
),
- TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu mode=%s reason=%s",
+ TP_printk("nr_succeeded=%lu nr_failed=%lu nr_thp_succeeded=%lu nr_thp_failed=%lu nr_thp_split=%lu nr_split=%lu mode=%s reason=%s",
__entry->succeeded,
__entry->failed,
__entry->thp_succeeded,
__entry->thp_failed,
__entry->thp_split,
+ __entry->large_folio_split,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index b63e7c0fbbe5..d801409b33cf 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -144,7 +144,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
#elif defined(CONFIG_PPC)
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
-#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
+#elif defined(CONFIG_PARISC)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
#elif !defined(CONFIG_MMU)
#define __VM_ARCH_SPECIFIC_1 {VM_MAPPED_COPY,"mappedcopy" }
@@ -223,8 +223,8 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
#define compact_result_to_feedback(result) \
({ \
enum compact_result __result = result; \
- (compaction_failed(__result)) ? COMPACTION_FAILED : \
- (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \
+ (__result == COMPACT_COMPLETE) ? COMPACTION_FAILED : \
+ (__result == COMPACT_SUCCESS) ? COMPACTION_PROGRESS : COMPACTION_WITHDRAWN; \
})
#define COMPACTION_FEEDBACK \
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 563e48617374..09e72215b9f9 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -44,7 +44,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
ssk = mptcp_subflow_tcp_sock(subflow);
if (ssk && sk_fullsock(ssk)) {
__entry->snd_wnd = tcp_sk(ssk)->snd_wnd;
- __entry->pace = ssk->sk_pacing_rate;
+ __entry->pace = READ_ONCE(ssk->sk_pacing_rate);
} else {
__entry->snd_wnd = 0;
__entry->pace = 0;
diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
index 5eaa1fa99171..833143d0992e 100644
--- a/include/trace/events/neigh.h
+++ b/include/trace/events/neigh.h
@@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create,
),
TP_fast_assign(
- struct in6_addr *pin6;
__be32 *p32;
__entry->family = tbl->family;
@@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create,
__entry->entries = atomic_read(&tbl->gc_entries);
__entry->created = n != NULL;
__entry->gc_exempt = exempt_from_gc;
- pin6 = (struct in6_addr *)__entry->primary_key6;
p32 = (__be32 *)__entry->primary_key4;
if (tbl->family == AF_INET)
@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create,
#if IS_ENABLED(CONFIG_IPV6)
if (tbl->family == AF_INET6) {
+ struct in6_addr *pin6;
+
pin6 = (struct in6_addr *)__entry->primary_key6;
*pin6 = *(struct in6_addr *)pkey;
}
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index da611a7aaf97..f667c76a3b02 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -51,7 +51,8 @@ TRACE_EVENT(net_dev_start_xmit,
__entry->network_offset = skb_network_offset(skb);
__entry->transport_offset_valid =
skb_transport_header_was_set(skb);
- __entry->transport_offset = skb_transport_offset(skb);
+ __entry->transport_offset = skb_transport_header_was_set(skb) ?
+ skb_transport_offset(skb) : 0;
__entry->tx_flags = skb_shinfo(skb)->tx_flags;
__entry->gso_size = skb_shinfo(skb)->gso_size;
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index beec534cbaab..447a8c21cf57 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -16,34 +16,57 @@
* Define enums for tracing information.
*/
#define netfs_read_traces \
+ EM(netfs_read_trace_dio_read, "DIO-READ ") \
EM(netfs_read_trace_expanded, "EXPANDED ") \
EM(netfs_read_trace_readahead, "READAHEAD") \
EM(netfs_read_trace_readpage, "READPAGE ") \
+ EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \
E_(netfs_read_trace_write_begin, "WRITEBEGN")
+#define netfs_write_traces \
+ EM(netfs_write_trace_dio_write, "DIO-WRITE") \
+ EM(netfs_write_trace_launder, "LAUNDER ") \
+ EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
+ EM(netfs_write_trace_writeback, "WRITEBACK") \
+ E_(netfs_write_trace_writethrough, "WRITETHRU")
+
#define netfs_rreq_origins \
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
- E_(NETFS_READ_FOR_WRITE, "RW")
+ EM(NETFS_READ_FOR_WRITE, "RW") \
+ EM(NETFS_WRITEBACK, "WB") \
+ EM(NETFS_WRITETHROUGH, "WT") \
+ EM(NETFS_LAUNDER_WRITE, "LW") \
+ EM(NETFS_UNBUFFERED_WRITE, "UW") \
+ EM(NETFS_DIO_READ, "DR") \
+ E_(NETFS_DIO_WRITE, "DW")
#define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \
EM(netfs_rreq_trace_copy, "COPY ") \
EM(netfs_rreq_trace_done, "DONE ") \
EM(netfs_rreq_trace_free, "FREE ") \
+ EM(netfs_rreq_trace_redirty, "REDIRTY") \
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
- E_(netfs_rreq_trace_unmark, "UNMARK ")
+ EM(netfs_rreq_trace_unmark, "UNMARK ") \
+ EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
+ EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \
+ E_(netfs_rreq_trace_write_done, "WR-DONE")
#define netfs_sreq_sources \
EM(NETFS_FILL_WITH_ZEROES, "ZERO") \
EM(NETFS_DOWNLOAD_FROM_SERVER, "DOWN") \
EM(NETFS_READ_FROM_CACHE, "READ") \
- E_(NETFS_INVALID_READ, "INVL") \
+ EM(NETFS_INVALID_READ, "INVL") \
+ EM(NETFS_UPLOAD_TO_SERVER, "UPLD") \
+ EM(NETFS_WRITE_TO_CACHE, "WRIT") \
+ E_(NETFS_INVALID_WRITE, "INVL")
#define netfs_sreq_traces \
EM(netfs_sreq_trace_download_instead, "RDOWN") \
EM(netfs_sreq_trace_free, "FREE ") \
+ EM(netfs_sreq_trace_limited, "LIMIT") \
EM(netfs_sreq_trace_prepare, "PREP ") \
EM(netfs_sreq_trace_resubmit_short, "SHORT") \
EM(netfs_sreq_trace_submit, "SUBMT") \
@@ -55,19 +78,24 @@
#define netfs_failures \
EM(netfs_fail_check_write_begin, "check-write-begin") \
EM(netfs_fail_copy_to_cache, "copy-to-cache") \
+ EM(netfs_fail_dio_read_short, "dio-read-short") \
+ EM(netfs_fail_dio_read_zero, "dio-read-zero") \
EM(netfs_fail_read, "read") \
EM(netfs_fail_short_read, "short-read") \
- E_(netfs_fail_prepare_write, "prep-write")
+ EM(netfs_fail_prepare_write, "prep-write") \
+ E_(netfs_fail_write, "write")
#define netfs_rreq_ref_traces \
- EM(netfs_rreq_trace_get_hold, "GET HOLD ") \
+ EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
- EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \
+ EM(netfs_rreq_trace_put_no_submit, "PUT NO-SUBM") \
+ EM(netfs_rreq_trace_put_return, "PUT RETURN ") \
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
- EM(netfs_rreq_trace_put_zero_len, "PUT ZEROLEN") \
+ EM(netfs_rreq_trace_put_work, "PUT WORK ") \
+ EM(netfs_rreq_trace_see_work, "SEE WORK ") \
E_(netfs_rreq_trace_new, "NEW ")
#define netfs_sreq_ref_traces \
@@ -76,11 +104,44 @@
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
EM(netfs_sreq_trace_new, "NEW ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
+ EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \
EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \
+ EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
+ EM(netfs_sreq_trace_put_work, "PUT WORK ") \
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
+#define netfs_folio_traces \
+ /* The first few correspond to enum netfs_how_to_modify */ \
+ EM(netfs_folio_is_uptodate, "mod-uptodate") \
+ EM(netfs_just_prefetch, "mod-prefetch") \
+ EM(netfs_whole_folio_modify, "mod-whole-f") \
+ EM(netfs_modify_and_clear, "mod-n-clear") \
+ EM(netfs_streaming_write, "mod-streamw") \
+ EM(netfs_streaming_write_cont, "mod-streamw+") \
+ EM(netfs_flush_content, "flush") \
+ EM(netfs_streaming_filled_page, "mod-streamw-f") \
+ EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
+ /* The rest are for writeback */ \
+ EM(netfs_folio_trace_clear, "clear") \
+ EM(netfs_folio_trace_clear_s, "clear-s") \
+ EM(netfs_folio_trace_clear_g, "clear-g") \
+ EM(netfs_folio_trace_copy_to_cache, "copy") \
+ EM(netfs_folio_trace_end_copy, "end-copy") \
+ EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
+ EM(netfs_folio_trace_kill, "kill") \
+ EM(netfs_folio_trace_launder, "launder") \
+ EM(netfs_folio_trace_mkwrite, "mkwrite") \
+ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
+ EM(netfs_folio_trace_read_gaps, "read-gaps") \
+ EM(netfs_folio_trace_redirty, "redirty") \
+ EM(netfs_folio_trace_redirtied, "redirtied") \
+ EM(netfs_folio_trace_store, "store") \
+ EM(netfs_folio_trace_store_plus, "store+") \
+ EM(netfs_folio_trace_wthru, "wthru") \
+ E_(netfs_folio_trace_wthru_plus, "wthru+")
+
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@@ -90,11 +151,13 @@
#define E_(a, b) a
enum netfs_read_trace { netfs_read_traces } __mode(byte);
+enum netfs_write_trace { netfs_write_traces } __mode(byte);
enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
enum netfs_failure { netfs_failures } __mode(byte);
enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
+enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
#endif
@@ -107,6 +170,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
netfs_read_traces;
+netfs_write_traces;
netfs_rreq_origins;
netfs_rreq_traces;
netfs_sreq_sources;
@@ -114,6 +178,7 @@ netfs_sreq_traces;
netfs_failures;
netfs_rreq_ref_traces;
netfs_sreq_ref_traces;
+netfs_folio_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -314,6 +379,82 @@ TRACE_EVENT(netfs_sreq_ref,
__entry->ref)
);
+TRACE_EVENT(netfs_folio,
+ TP_PROTO(struct folio *folio, enum netfs_folio_trace why),
+
+ TP_ARGS(folio, why),
+
+ TP_STRUCT__entry(
+ __field(ino_t, ino)
+ __field(pgoff_t, index)
+ __field(unsigned int, nr)
+ __field(enum netfs_folio_trace, why)
+ ),
+
+ TP_fast_assign(
+ __entry->ino = folio->mapping->host->i_ino;
+ __entry->why = why;
+ __entry->index = folio_index(folio);
+ __entry->nr = folio_nr_pages(folio);
+ ),
+
+ TP_printk("i=%05lx ix=%05lx-%05lx %s",
+ __entry->ino, __entry->index, __entry->index + __entry->nr - 1,
+ __print_symbolic(__entry->why, netfs_folio_traces))
+ );
+
+TRACE_EVENT(netfs_write_iter,
+ TP_PROTO(const struct kiocb *iocb, const struct iov_iter *from),
+
+ TP_ARGS(iocb, from),
+
+ TP_STRUCT__entry(
+ __field(unsigned long long, start )
+ __field(size_t, len )
+ __field(unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->start = iocb->ki_pos;
+ __entry->len = iov_iter_count(from);
+ __entry->flags = iocb->ki_flags;
+ ),
+
+ TP_printk("WRITE-ITER s=%llx l=%zx f=%x",
+ __entry->start, __entry->len, __entry->flags)
+ );
+
+TRACE_EVENT(netfs_write,
+ TP_PROTO(const struct netfs_io_request *wreq,
+ enum netfs_write_trace what),
+
+ TP_ARGS(wreq, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, wreq )
+ __field(unsigned int, cookie )
+ __field(enum netfs_write_trace, what )
+ __field(unsigned long long, start )
+ __field(size_t, len )
+ ),
+
+ TP_fast_assign(
+ struct netfs_inode *__ctx = netfs_inode(wreq->inode);
+ struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
+ __entry->wreq = wreq->debug_id;
+ __entry->cookie = __cookie ? __cookie->debug_id : 0;
+ __entry->what = what;
+ __entry->start = wreq->start;
+ __entry->len = wreq->len;
+ ),
+
+ TP_printk("R=%08x %s c=%08x by=%llx-%llx",
+ __entry->wreq,
+ __print_symbolic(__entry->what, netfs_write_traces),
+ __entry->cookie,
+ __entry->start, __entry->start + __entry->len - 1)
+ );
+
#undef EM
#undef E_
#endif /* _TRACE_NETFS_H */
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index ca534501158b..6834356b2d2a 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -9,7 +9,7 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
TRACE_EVENT(page_pool_release,
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index a3995925cb05..1f4258308b96 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -81,14 +81,14 @@ TRACE_EVENT(qdisc_reset,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q)->name )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
+ __assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;
@@ -106,14 +106,14 @@ TRACE_EVENT(qdisc_destroy,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q)->name )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
+ __assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 8f461e04e5f0..110c1475c527 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -22,47 +22,37 @@
** Event classes
**/
-DECLARE_EVENT_CLASS(rpcrdma_completion_class,
+DECLARE_EVENT_CLASS(rpcrdma_simple_cid_class,
TP_PROTO(
- const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
),
- TP_ARGS(wc, cid),
+ TP_ARGS(cid),
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
- __field(unsigned long, status)
- __field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
- __entry->status = wc->status;
- if (wc->status)
- __entry->vendor_err = wc->vendor_err;
- else
- __entry->vendor_err = 0;
),
- TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
- __entry->cq_id, __entry->completion_id,
- rdma_show_wc_status(__entry->status),
- __entry->status, __entry->vendor_err
+ TP_printk("cq.id=%d cid=%d",
+ __entry->cq_id, __entry->completion_id
)
);
-#define DEFINE_COMPLETION_EVENT(name) \
- DEFINE_EVENT(rpcrdma_completion_class, name, \
+#define DEFINE_SIMPLE_CID_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_simple_cid_class, name, \
TP_PROTO( \
- const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
), \
- TP_ARGS(wc, cid))
+ TP_ARGS(cid) \
+ )
-DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
+DECLARE_EVENT_CLASS(rpcrdma_completion_class,
TP_PROTO(
const struct ib_wc *wc,
const struct rpc_rdma_cid *cid
@@ -73,20 +63,29 @@ DECLARE_EVENT_CLASS(rpcrdma_send_completion_class,
TP_STRUCT__entry(
__field(u32, cq_id)
__field(int, completion_id)
+ __field(unsigned long, status)
+ __field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->cq_id = cid->ci_queue_id;
__entry->completion_id = cid->ci_completion_id;
+ __entry->status = wc->status;
+ if (wc->status)
+ __entry->vendor_err = wc->vendor_err;
+ else
+ __entry->vendor_err = 0;
),
- TP_printk("cq.id=%u cid=%d",
- __entry->cq_id, __entry->completion_id
+ TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
+ __entry->cq_id, __entry->completion_id,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
)
);
-#define DEFINE_SEND_COMPLETION_EVENT(name) \
- DEFINE_EVENT(rpcrdma_send_completion_class, name, \
+#define DEFINE_COMPLETION_EVENT(name) \
+ DEFINE_EVENT(rpcrdma_completion_class, name, \
TP_PROTO( \
const struct ib_wc *wc, \
const struct rpc_rdma_cid *cid \
@@ -978,27 +977,7 @@ TRACE_EVENT(xprtrdma_post_send_err,
)
);
-TRACE_EVENT(xprtrdma_post_recv,
- TP_PROTO(
- const struct rpcrdma_rep *rep
- ),
-
- TP_ARGS(rep),
-
- TP_STRUCT__entry(
- __field(u32, cq_id)
- __field(int, completion_id)
- ),
-
- TP_fast_assign(
- __entry->cq_id = rep->rr_cid.ci_queue_id;
- __entry->completion_id = rep->rr_cid.ci_completion_id;
- ),
-
- TP_printk("cq.id=%d cid=%d",
- __entry->cq_id, __entry->completion_id
- )
-);
+DEFINE_SIMPLE_CID_EVENT(xprtrdma_post_recv);
TRACE_EVENT(xprtrdma_post_recvs,
TP_PROTO(
@@ -1667,7 +1646,7 @@ TRACE_EVENT(svcrdma_encode_wseg,
__entry->offset = offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1703,7 +1682,7 @@ TRACE_EVENT(svcrdma_decode_rseg,
__entry->offset = segment->rs_offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->position, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1740,7 +1719,7 @@ TRACE_EVENT(svcrdma_decode_wseg,
__entry->offset = segment->rs_offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1783,29 +1762,29 @@ DEFINE_ERROR_EVENT(chunk);
DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
u64 dma_addr,
u32 length
),
- TP_ARGS(rdma, dma_addr, length),
+ TP_ARGS(cid, dma_addr, length),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(u64, dma_addr)
__field(u32, length)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->dma_addr = dma_addr;
__entry->length = length;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
- __get_str(addr), __get_str(device),
+ TP_printk("cq.id=%u cid=%d dma_addr=%llu length=%u",
+ __entry->cq_id, __entry->completion_id,
__entry->dma_addr, __entry->length
)
);
@@ -1813,11 +1792,12 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
#define DEFINE_SVC_DMA_EVENT(name) \
DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
TP_PROTO( \
- const struct svcxprt_rdma *rdma,\
+ const struct rpc_rdma_cid *cid, \
u64 dma_addr, \
u32 length \
), \
- TP_ARGS(rdma, dma_addr, length))
+ TP_ARGS(cid, dma_addr, length) \
+ )
DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_map_err);
@@ -1826,33 +1806,37 @@ DEFINE_SVC_DMA_EVENT(dma_unmap_page);
TRACE_EVENT(svcrdma_dma_map_rw_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ u64 offset,
+ u32 handle,
unsigned int nents,
int status
),
- TP_ARGS(rdma, nents, status),
+ TP_ARGS(rdma, offset, handle, nents, status),
TP_STRUCT__entry(
- __field(int, status)
+ __field(u32, cq_id)
+ __field(u32, handle)
+ __field(u64, offset)
__field(unsigned int, nents)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ __field(int, status)
),
TP_fast_assign(
- __entry->status = status;
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
+ __entry->handle = handle;
+ __entry->offset = offset;
__entry->nents = nents;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ __entry->status = status;
),
- TP_printk("addr=%s device=%s nents=%u status=%d",
- __get_str(addr), __get_str(device), __entry->nents,
- __entry->status
+ TP_printk("cq.id=%u 0x%016llx:0x%08x nents=%u status=%d",
+ __entry->cq_id, (unsigned long long)__entry->offset,
+ __entry->handle, __entry->nents, __entry->status
)
);
-TRACE_EVENT(svcrdma_no_rwctx_err,
+TRACE_EVENT(svcrdma_rwctx_empty,
TP_PROTO(
const struct svcxprt_rdma *rdma,
unsigned int num_sges
@@ -1861,79 +1845,75 @@ TRACE_EVENT(svcrdma_no_rwctx_err,
TP_ARGS(rdma, num_sges),
TP_STRUCT__entry(
+ __field(u32, cq_id)
__field(unsigned int, num_sges)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = rdma->sc_sq_cq->res.id;
__entry->num_sges = num_sges;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s num_sges=%d",
- __get_str(addr), __get_str(device), __entry->num_sges
+ TP_printk("cq.id=%u num_sges=%d",
+ __entry->cq_id, __entry->num_sges
)
);
TRACE_EVENT(svcrdma_page_overrun_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
- const struct svc_rqst *rqst,
+ const struct rpc_rdma_cid *cid,
unsigned int pageno
),
- TP_ARGS(rdma, rqst, pageno),
+ TP_ARGS(cid, pageno),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, pageno)
- __field(u32, xid)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->pageno = pageno;
- __entry->xid = __be32_to_cpu(rqst->rq_xid);
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
- __get_str(device), __entry->xid, __entry->pageno
+ TP_printk("cq.id=%u cid=%d pageno=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->pageno
)
);
TRACE_EVENT(svcrdma_small_wrch_err,
TP_PROTO(
- const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
unsigned int remaining,
unsigned int seg_no,
unsigned int num_segs
),
- TP_ARGS(rdma, remaining, seg_no, num_segs),
+ TP_ARGS(cid, remaining, seg_no, num_segs),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(unsigned int, remaining)
__field(unsigned int, seg_no)
__field(unsigned int, num_segs)
- __string(device, rdma->sc_cm_id->device->name)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->remaining = remaining;
__entry->seg_no = seg_no;
__entry->num_segs = num_segs;
- __assign_str(device, rdma->sc_cm_id->device->name);
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
- __get_str(addr), __get_str(device), __entry->remaining,
- __entry->seg_no, __entry->num_segs
+ TP_printk("cq.id=%u cid=%d remaining=%u seg_no=%u num_segs=%u",
+ __entry->cq_id, __entry->completion_id,
+ __entry->remaining, __entry->seg_no, __entry->num_segs
)
);
@@ -1959,7 +1939,7 @@ TRACE_EVENT(svcrdma_send_pullup,
__entry->msglen = msglen;
),
- TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
+ TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)",
__entry->cq_id, __entry->completion_id,
__entry->hdrlen, __entry->msglen,
__entry->hdrlen + __entry->msglen)
@@ -2014,37 +1994,17 @@ TRACE_EVENT(svcrdma_post_send,
wr->ex.invalidate_rkey : 0;
),
- TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
+ TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->num_sge, __entry->inv_rkey
)
);
-DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_send);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_send);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_send_err);
-TRACE_EVENT(svcrdma_post_recv,
- TP_PROTO(
- const struct svc_rdma_recv_ctxt *ctxt
- ),
-
- TP_ARGS(ctxt),
-
- TP_STRUCT__entry(
- __field(u32, cq_id)
- __field(int, completion_id)
- ),
-
- TP_fast_assign(
- __entry->cq_id = ctxt->rc_cid.ci_queue_id;
- __entry->completion_id = ctxt->rc_cid.ci_completion_id;
- ),
-
- TP_printk("cq.id=%d cid=%d",
- __entry->cq_id, __entry->completion_id
- )
-);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_post_recv);
DEFINE_RECEIVE_SUCCESS_EVENT(svcrdma_wc_recv);
DEFINE_RECEIVE_FLUSH_EVENT(svcrdma_wc_recv_flush);
@@ -2112,6 +2072,14 @@ DEFINE_POST_CHUNK_EVENT(read);
DEFINE_POST_CHUNK_EVENT(write);
DEFINE_POST_CHUNK_EVENT(reply);
+DEFINE_EVENT(svcrdma_post_chunk_class, svcrdma_cc_release,
+ TP_PROTO(
+ const struct rpc_rdma_cid *cid,
+ int sqecount
+ ),
+ TP_ARGS(cid, sqecount)
+);
+
TRACE_EVENT(svcrdma_wc_read,
TP_PROTO(
const struct ib_wc *wc,
@@ -2144,8 +2112,9 @@ TRACE_EVENT(svcrdma_wc_read,
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_read_err);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_read_finished);
-DEFINE_SEND_COMPLETION_EVENT(svcrdma_wc_write);
+DEFINE_SIMPLE_CID_EVENT(svcrdma_wc_write);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_flush);
DEFINE_SEND_FLUSH_EVENT(svcrdma_wc_write_err);
@@ -2176,65 +2145,74 @@ TRACE_EVENT(svcrdma_qp_error,
)
);
-DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
+DECLARE_EVENT_CLASS(svcrdma_sendqueue_class,
TP_PROTO(
- const struct svcxprt_rdma *rdma
+ const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid
),
- TP_ARGS(rdma),
+ TP_ARGS(rdma, cid),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d",
- __get_str(addr), __entry->avail, __entry->depth
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth
)
);
#define DEFINE_SQ_EVENT(name) \
- DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
- TP_PROTO( \
- const struct svcxprt_rdma *rdma \
- ), \
- TP_ARGS(rdma))
+ DEFINE_EVENT(svcrdma_sendqueue_class, name, \
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma, \
+ const struct rpc_rdma_cid *cid \
+ ), \
+ TP_ARGS(rdma, cid) \
+ )
-DEFINE_SQ_EVENT(full);
-DEFINE_SQ_EVENT(retry);
+DEFINE_SQ_EVENT(svcrdma_sq_full);
+DEFINE_SQ_EVENT(svcrdma_sq_retry);
TRACE_EVENT(svcrdma_sq_post_err,
TP_PROTO(
const struct svcxprt_rdma *rdma,
+ const struct rpc_rdma_cid *cid,
int status
),
- TP_ARGS(rdma, status),
+ TP_ARGS(rdma, cid, status),
TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, completion_id)
__field(int, avail)
__field(int, depth)
__field(int, status)
- __string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
+ __entry->cq_id = cid->ci_queue_id;
+ __entry->completion_id = cid->ci_completion_id;
__entry->avail = atomic_read(&rdma->sc_sq_avail);
__entry->depth = rdma->sc_sq_depth;
__entry->status = status;
- __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
- __get_str(addr), __entry->avail, __entry->depth,
- __entry->status
+ TP_printk("cq.id=%u cid=%d sc_sq_avail=%d/%d status=%d",
+ __entry->cq_id, __entry->completion_id,
+ __entry->avail, __entry->depth, __entry->status
)
);
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 4c53a5ef6257..87b8de9b6c1c 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -128,6 +128,7 @@
EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \
EM(rxrpc_skb_get_conn_work, "GET conn-work") \
+ EM(rxrpc_skb_get_last_nack, "GET last-nack") \
EM(rxrpc_skb_get_local_work, "GET locl-work") \
EM(rxrpc_skb_get_reject_work, "GET rej-work ") \
EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \
@@ -141,6 +142,7 @@
EM(rxrpc_skb_put_error_report, "PUT error-rep") \
EM(rxrpc_skb_put_input, "PUT input ") \
EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
+ EM(rxrpc_skb_put_last_nack, "PUT last-nack") \
EM(rxrpc_skb_put_purge, "PUT purge ") \
EM(rxrpc_skb_put_rotate, "PUT rotate ") \
EM(rxrpc_skb_put_unknown, "PUT unknown ") \
@@ -178,7 +180,9 @@
#define rxrpc_peer_traces \
EM(rxrpc_peer_free, "FREE ") \
EM(rxrpc_peer_get_accept, "GET accept ") \
+ EM(rxrpc_peer_get_application, "GET app ") \
EM(rxrpc_peer_get_bundle, "GET bundle ") \
+ EM(rxrpc_peer_get_call, "GET call ") \
EM(rxrpc_peer_get_client_conn, "GET cln-conn") \
EM(rxrpc_peer_get_input, "GET input ") \
EM(rxrpc_peer_get_input_error, "GET inpt-err") \
@@ -187,6 +191,7 @@
EM(rxrpc_peer_get_service_conn, "GET srv-conn") \
EM(rxrpc_peer_new_client, "NEW client ") \
EM(rxrpc_peer_new_prealloc, "NEW prealloc") \
+ EM(rxrpc_peer_put_application, "PUT app ") \
EM(rxrpc_peer_put_bundle, "PUT bundle ") \
EM(rxrpc_peer_put_call, "PUT call ") \
EM(rxrpc_peer_put_conn, "PUT conn ") \
@@ -328,7 +333,7 @@
E_(rxrpc_rtt_tx_ping, "PING")
#define rxrpc_rtt_rx_traces \
- EM(rxrpc_rtt_rx_cancel, "CNCL") \
+ EM(rxrpc_rtt_rx_other_ack, "OACK") \
EM(rxrpc_rtt_rx_obsolete, "OBSL") \
EM(rxrpc_rtt_rx_lost, "LOST") \
EM(rxrpc_rtt_rx_ping_response, "PONG") \
@@ -1549,7 +1554,7 @@ TRACE_EVENT(rxrpc_congest,
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
),
- TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
__entry->ack_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
@@ -1557,9 +1562,9 @@ TRACE_EVENT(rxrpc_congest,
__print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
__entry->sum.cwnd,
__entry->sum.ssthresh,
- __entry->sum.nr_acks, __entry->sum.saw_nacks,
+ __entry->sum.nr_acks, __entry->sum.nr_retained_nacks,
__entry->sum.nr_new_acks,
- __entry->sum.nr_rot_new_acks,
+ __entry->sum.nr_new_nacks,
__entry->top - __entry->hard_ack,
__entry->sum.cumulative_acks,
__entry->sum.dup_acks,
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index fbb99a61f714..dbb01b4b7451 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -493,33 +493,30 @@ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
*/
DECLARE_EVENT_CLASS(sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
- TP_ARGS(tsk, __perf_count(runtime), vruntime),
+ TP_ARGS(tsk, __perf_count(runtime)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
- __field( u64, vruntime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
- __entry->vruntime = vruntime;
),
- TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
+ TP_printk("comm=%s pid=%d runtime=%Lu [ns]",
__entry->comm, __entry->pid,
- (unsigned long long)__entry->runtime,
- (unsigned long long)__entry->vruntime)
+ (unsigned long long)__entry->runtime)
);
DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
- TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
- TP_ARGS(tsk, runtime, vruntime));
+ TP_PROTO(struct task_struct *tsk, u64 runtime),
+ TP_ARGS(tsk, runtime));
/*
* Tracepoint for showing priority inheritance modifying a tasks
@@ -664,6 +661,58 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
);
+#ifdef CONFIG_NUMA_BALANCING
+#define NUMAB_SKIP_REASON \
+ EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \
+ EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \
+ EM( NUMAB_SKIP_INACCESSIBLE, "inaccessible" ) \
+ EM( NUMAB_SKIP_SCAN_DELAY, "scan_delay" ) \
+ EM( NUMAB_SKIP_PID_INACTIVE, "pid_inactive" ) \
+ EM( NUMAB_SKIP_IGNORE_PID, "ignore_pid_inactive" ) \
+ EMe(NUMAB_SKIP_SEQ_COMPLETED, "seq_completed" )
+
+/* Redefine for export. */
+#undef EM
+#undef EMe
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
+
+NUMAB_SKIP_REASON
+
+/* Redefine for symbolic printing. */
+#undef EM
+#undef EMe
+#define EM(a, b) { a, b },
+#define EMe(a, b) { a, b }
+
+TRACE_EVENT(sched_skip_vma_numa,
+
+ TP_PROTO(struct mm_struct *mm, struct vm_area_struct *vma,
+ enum numa_vmaskip_reason reason),
+
+ TP_ARGS(mm, vma, reason),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, numa_scan_offset)
+ __field(unsigned long, vm_start)
+ __field(unsigned long, vm_end)
+ __field(enum numa_vmaskip_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->numa_scan_offset = mm->numa_scan_offset;
+ __entry->vm_start = vma->vm_start;
+ __entry->vm_end = vma->vm_end;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("numa_scan_offset=%lX vm_start=%lX vm_end=%lX reason=%s",
+ __entry->numa_scan_offset,
+ __entry->vm_start,
+ __entry->vm_end,
+ __print_symbolic(__entry->reason, NUMAB_SKIP_REASON))
+);
+#endif /* CONFIG_NUMA_BALANCING */
/*
* Tracepoint for waking a polling cpu without an IPI.
@@ -735,6 +784,11 @@ DECLARE_TRACE(sched_update_nr_running_tp,
TP_PROTO(struct rq *rq, int change),
TP_ARGS(rq, change));
+DECLARE_TRACE(sched_compute_energy_tp,
+ TP_PROTO(struct task_struct *p, int dst_cpu, unsigned long energy,
+ unsigned long max_util, unsigned long busy_time),
+ TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index a2c7befd451a..8e2d9b1b0e77 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -269,9 +269,14 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__field( unsigned int, prot_sglen )
__field( unsigned char, prot_op )
__dynamic_array(unsigned char, cmnd, cmd->cmd_len)
+ __field( u8, sense_key )
+ __field( u8, asc )
+ __field( u8, ascq )
),
TP_fast_assign(
+ struct scsi_sense_hdr sshdr;
+
__entry->host_no = cmd->device->host->host_no;
__entry->channel = cmd->device->channel;
__entry->id = cmd->device->id;
@@ -285,11 +290,22 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__entry->prot_sglen = scsi_prot_sg_count(cmd);
__entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
+ if (cmd->sense_buffer && SCSI_SENSE_VALID(cmd) &&
+ scsi_command_normalize_sense(cmd, &sshdr)) {
+ __entry->sense_key = sshdr.sense_key;
+ __entry->asc = sshdr.asc;
+ __entry->ascq = sshdr.ascq;
+ } else {
+ __entry->sense_key = 0;
+ __entry->asc = 0;
+ __entry->ascq = 0;
+ }
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u " \
"prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s) " \
- "result=(driver=%s host=%s message=%s status=%s)",
+ "result=(driver=%s host=%s message=%s status=%s) "
+ "sense=(key=%#x asc=%#x ascq=%#x)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
show_prot_op_name(__entry->prot_op), __entry->driver_tag,
@@ -299,7 +315,8 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
"DRIVER_OK",
show_hostbyte_name(((__entry->result) >> 16) & 0xff),
"COMMAND_COMPLETE",
- show_statusbyte_name(__entry->result & 0xff))
+ show_statusbyte_name(__entry->result & 0xff),
+ __entry->sense_key, __entry->asc, __entry->ascq)
);
DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done,
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index c0248a8fa79c..e63d4a24d879 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -167,7 +167,7 @@ TRACE_EVENT(spi_message_done,
);
/*
- * consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
+ * Consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
* that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or
* SPI_CONTROLLER_MUST_RX.
*/
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 31bc7025cb44..cdd3a45e6003 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -139,36 +139,68 @@ DEFINE_RPC_CLNT_EVENT(release);
DEFINE_RPC_CLNT_EVENT(replace_xprt);
DEFINE_RPC_CLNT_EVENT(replace_xprt_err);
+TRACE_DEFINE_ENUM(RPC_XPRTSEC_NONE);
+TRACE_DEFINE_ENUM(RPC_XPRTSEC_TLS_X509);
+
+#define rpc_show_xprtsec_policy(policy) \
+ __print_symbolic(policy, \
+ { RPC_XPRTSEC_NONE, "none" }, \
+ { RPC_XPRTSEC_TLS_ANON, "tls-anon" }, \
+ { RPC_XPRTSEC_TLS_X509, "tls-x509" })
+
+#define rpc_show_create_flags(flags) \
+ __print_flags(flags, "|", \
+ { RPC_CLNT_CREATE_HARDRTRY, "HARDRTRY" }, \
+ { RPC_CLNT_CREATE_AUTOBIND, "AUTOBIND" }, \
+ { RPC_CLNT_CREATE_NONPRIVPORT, "NONPRIVPORT" }, \
+ { RPC_CLNT_CREATE_NOPING, "NOPING" }, \
+ { RPC_CLNT_CREATE_DISCRTRY, "DISCRTRY" }, \
+ { RPC_CLNT_CREATE_QUIET, "QUIET" }, \
+ { RPC_CLNT_CREATE_INFINITE_SLOTS, \
+ "INFINITE_SLOTS" }, \
+ { RPC_CLNT_CREATE_NO_IDLE_TIMEOUT, \
+ "NO_IDLE_TIMEOUT" }, \
+ { RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT, \
+ "NO_RETRANS_TIMEOUT" }, \
+ { RPC_CLNT_CREATE_SOFTERR, "SOFTERR" }, \
+ { RPC_CLNT_CREATE_REUSEPORT, "REUSEPORT" })
+
TRACE_EVENT(rpc_clnt_new,
TP_PROTO(
const struct rpc_clnt *clnt,
const struct rpc_xprt *xprt,
- const char *program,
- const char *server
+ const struct rpc_create_args *args
),
- TP_ARGS(clnt, xprt, program, server),
+ TP_ARGS(clnt, xprt, args),
TP_STRUCT__entry(
__field(unsigned int, client_id)
+ __field(unsigned long, xprtsec)
+ __field(unsigned long, flags)
+ __string(program, clnt->cl_program->name)
+ __string(server, xprt->servername)
__string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
__string(port, xprt->address_strings[RPC_DISPLAY_PORT])
- __string(program, program)
- __string(server, server)
),
TP_fast_assign(
__entry->client_id = clnt->cl_clid;
+ __entry->xprtsec = args->xprtsec.policy;
+ __entry->flags = args->flags;
+ __assign_str(program, clnt->cl_program->name);
+ __assign_str(server, xprt->servername);
__assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
- __assign_str(program, program);
- __assign_str(server, server);
),
- TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER
- " peer=[%s]:%s program=%s server=%s",
+ TP_printk("client=" SUNRPC_TRACE_CLID_SPECIFIER " peer=[%s]:%s"
+ " program=%s server=%s xprtsec=%s flags=%s",
__entry->client_id, __get_str(addr), __get_str(port),
- __get_str(program), __get_str(server))
+ __get_str(program), __get_str(server),
+ rpc_show_xprtsec_policy(__entry->xprtsec),
+ rpc_show_create_flags(__entry->flags)
+ )
);
TRACE_EVENT(rpc_clnt_new_err,
@@ -1493,6 +1525,50 @@ TRACE_EVENT(rpcb_unregister,
)
);
+/**
+ ** RPC-over-TLS tracepoints
+ **/
+
+DECLARE_EVENT_CLASS(rpc_tls_class,
+ TP_PROTO(
+ const struct rpc_clnt *clnt,
+ const struct rpc_xprt *xprt
+ ),
+
+ TP_ARGS(clnt, xprt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, requested_policy)
+ __field(u32, version)
+ __string(servername, xprt->servername)
+ __string(progname, clnt->cl_program->name)
+ ),
+
+ TP_fast_assign(
+ __entry->requested_policy = clnt->cl_xprtsec.policy;
+ __entry->version = clnt->cl_vers;
+ __assign_str(servername, xprt->servername);
+ __assign_str(progname, clnt->cl_program->name)
+ ),
+
+ TP_printk("server=%s %sv%u requested_policy=%s",
+ __get_str(servername), __get_str(progname), __entry->version,
+ rpc_show_xprtsec_policy(__entry->requested_policy)
+ )
+);
+
+#define DEFINE_RPC_TLS_EVENT(name) \
+ DEFINE_EVENT(rpc_tls_class, rpc_tls_##name, \
+ TP_PROTO( \
+ const struct rpc_clnt *clnt, \
+ const struct rpc_xprt *xprt \
+ ), \
+ TP_ARGS(clnt, xprt))
+
+DEFINE_RPC_TLS_EVENT(unavailable);
+DEFINE_RPC_TLS_EVENT(not_started);
+
+
/* Record an xdr_buf containing a fully-formed RPC message */
DECLARE_EVENT_CLASS(svc_xdr_msg_class,
TP_PROTO(
@@ -1599,9 +1675,7 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
svc_rqst_flag(LOCAL) \
svc_rqst_flag(USEDEFERRAL) \
svc_rqst_flag(DROPME) \
- svc_rqst_flag(SPLICE_OK) \
svc_rqst_flag(VICTIM) \
- svc_rqst_flag(BUSY) \
svc_rqst_flag_end(DATA)
#undef svc_rqst_flag
@@ -1630,7 +1704,7 @@ TRACE_DEFINE_ENUM(SVC_DENIED);
TRACE_DEFINE_ENUM(SVC_PENDING);
TRACE_DEFINE_ENUM(SVC_COMPLETE);
-#define svc_show_status(status) \
+#define show_svc_auth_status(status) \
__print_symbolic(status, \
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
{ SVC_SYSERR, "SVC_SYSERR" }, \
@@ -1667,7 +1741,10 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE);
__entry->xid, __get_sockaddr(server), __get_sockaddr(client)
TRACE_EVENT_CONDITION(svc_authenticate,
- TP_PROTO(const struct svc_rqst *rqst, int auth_res),
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ enum svc_auth_status auth_res
+ ),
TP_ARGS(rqst, auth_res),
@@ -1690,7 +1767,7 @@ TRACE_EVENT_CONDITION(svc_authenticate,
TP_printk(SVC_RQST_ENDPOINT_FORMAT
" auth_res=%s auth_stat=%s",
SVC_RQST_ENDPOINT_VARARGS,
- svc_show_status(__entry->svc_status),
+ show_svc_auth_status(__entry->svc_status),
rpc_show_auth_stat(__entry->auth_stat))
);
@@ -1842,25 +1919,42 @@ TRACE_EVENT(svc_stats_latency,
__get_str(procedure), __entry->execute)
);
+/*
+ * from include/linux/sunrpc/svc_xprt.h
+ */
+#define SVC_XPRT_FLAG_LIST \
+ svc_xprt_flag(BUSY) \
+ svc_xprt_flag(CONN) \
+ svc_xprt_flag(CLOSE) \
+ svc_xprt_flag(DATA) \
+ svc_xprt_flag(TEMP) \
+ svc_xprt_flag(DEAD) \
+ svc_xprt_flag(CHNGBUF) \
+ svc_xprt_flag(DEFERRED) \
+ svc_xprt_flag(OLD) \
+ svc_xprt_flag(LISTENER) \
+ svc_xprt_flag(CACHE_AUTH) \
+ svc_xprt_flag(LOCAL) \
+ svc_xprt_flag(KILL_TEMP) \
+ svc_xprt_flag(CONG_CTRL) \
+ svc_xprt_flag(HANDSHAKE) \
+ svc_xprt_flag(TLS_SESSION) \
+ svc_xprt_flag_end(PEER_AUTH)
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) TRACE_DEFINE_ENUM(XPT_##x);
+#define svc_xprt_flag_end(x) TRACE_DEFINE_ENUM(XPT_##x);
+
+SVC_XPRT_FLAG_LIST
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) { BIT(XPT_##x), #x },
+#define svc_xprt_flag_end(x) { BIT(XPT_##x), #x }
+
#define show_svc_xprt_flags(flags) \
- __print_flags(flags, "|", \
- { BIT(XPT_BUSY), "BUSY" }, \
- { BIT(XPT_CONN), "CONN" }, \
- { BIT(XPT_CLOSE), "CLOSE" }, \
- { BIT(XPT_DATA), "DATA" }, \
- { BIT(XPT_TEMP), "TEMP" }, \
- { BIT(XPT_DEAD), "DEAD" }, \
- { BIT(XPT_CHNGBUF), "CHNGBUF" }, \
- { BIT(XPT_DEFERRED), "DEFERRED" }, \
- { BIT(XPT_OLD), "OLD" }, \
- { BIT(XPT_LISTENER), "LISTENER" }, \
- { BIT(XPT_CACHE_AUTH), "CACHE_AUTH" }, \
- { BIT(XPT_LOCAL), "LOCAL" }, \
- { BIT(XPT_KILL_TEMP), "KILL_TEMP" }, \
- { BIT(XPT_CONG_CTRL), "CONG_CTRL" }, \
- { BIT(XPT_HANDSHAKE), "HANDSHAKE" }, \
- { BIT(XPT_TLS_SESSION), "TLS_SESSION" }, \
- { BIT(XPT_PEER_AUTH), "PEER_AUTH" })
+ __print_flags(flags, "|", SVC_XPRT_FLAG_LIST)
TRACE_EVENT(svc_xprt_create_err,
TP_PROTO(
@@ -1918,25 +2012,25 @@ TRACE_EVENT(svc_xprt_create_err,
TRACE_EVENT(svc_xprt_enqueue,
TP_PROTO(
const struct svc_xprt *xprt,
- const struct svc_rqst *rqst
+ unsigned long flags
),
- TP_ARGS(xprt, rqst),
+ TP_ARGS(xprt, flags),
TP_STRUCT__entry(
SVC_XPRT_ENDPOINT_FIELDS(xprt)
-
- __field(int, pid)
),
TP_fast_assign(
- SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
-
- __entry->pid = rqst? rqst->rq_task->pid : 0;
+ __assign_sockaddr(server, &xprt->xpt_local,
+ xprt->xpt_locallen);
+ __assign_sockaddr(client, &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = flags;
+ __entry->netns_ino = xprt->xpt_net->ns.inum;
),
- TP_printk(SVC_XPRT_ENDPOINT_FORMAT " pid=%d",
- SVC_XPRT_ENDPOINT_VARARGS, __entry->pid)
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS)
);
TRACE_EVENT(svc_xprt_dequeue,
@@ -2104,31 +2198,46 @@ DEFINE_SVC_DEFERRED_EVENT(drop);
DEFINE_SVC_DEFERRED_EVENT(queue);
DEFINE_SVC_DEFERRED_EVENT(recv);
-TRACE_EVENT(svcsock_new_socket,
+DECLARE_EVENT_CLASS(svcsock_lifetime_class,
TP_PROTO(
+ const void *svsk,
const struct socket *socket
),
-
- TP_ARGS(socket),
-
+ TP_ARGS(svsk, socket),
TP_STRUCT__entry(
+ __field(unsigned int, netns_ino)
+ __field(const void *, svsk)
+ __field(const void *, sk)
__field(unsigned long, type)
__field(unsigned long, family)
- __field(bool, listener)
+ __field(unsigned long, state)
),
-
TP_fast_assign(
+ struct sock *sk = socket->sk;
+
+ __entry->netns_ino = sock_net(sk)->ns.inum;
+ __entry->svsk = svsk;
+ __entry->sk = sk;
__entry->type = socket->type;
- __entry->family = socket->sk->sk_family;
- __entry->listener = (socket->sk->sk_state == TCP_LISTEN);
+ __entry->family = sk->sk_family;
+ __entry->state = sk->sk_state;
),
-
- TP_printk("type=%s family=%s%s",
- show_socket_type(__entry->type),
+ TP_printk("svsk=%p type=%s family=%s%s",
+ __entry->svsk, show_socket_type(__entry->type),
rpc_show_address_family(__entry->family),
- __entry->listener ? " (listener)" : ""
+ __entry->state == TCP_LISTEN ? " (listener)" : ""
)
);
+#define DEFINE_SVCSOCK_LIFETIME_EVENT(name) \
+ DEFINE_EVENT(svcsock_lifetime_class, name, \
+ TP_PROTO( \
+ const void *svsk, \
+ const struct socket *socket \
+ ), \
+ TP_ARGS(svsk, socket))
+
+DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_new);
+DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_free);
TRACE_EVENT(svcsock_marker,
TP_PROTO(
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 64d160930b0d..47b527464d1a 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -47,7 +47,7 @@ TRACE_EVENT(task_rename,
TP_fast_assign(
__entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
- strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
+ strscpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index bf06db8d2046..7b1ddffa3dfc 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -381,6 +381,7 @@ TRACE_EVENT(tcp_cong_state_set,
__field(const void *, skaddr)
__field(__u16, sport)
__field(__u16, dport)
+ __field(__u16, family)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
@@ -396,6 +397,7 @@ TRACE_EVENT(tcp_cong_state_set,
__entry->sport = ntohs(inet->inet_sport);
__entry->dport = ntohs(inet->inet_dport);
+ __entry->family = sk->sk_family;
p32 = (__be32 *) __entry->saddr;
*p32 = inet->inet_saddr;
@@ -409,7 +411,8 @@ TRACE_EVENT(tcp_cong_state_set,
__entry->cong_state = ca_state;
),
- TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
+ TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u",
+ show_family_name(__entry->family),
__entry->sport, __entry->dport,
__entry->saddr, __entry->daddr,
__entry->saddr_v6, __entry->daddr_v6,
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index 202b3e3e67ff..f50048af5fcc 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -8,25 +8,34 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-TRACE_EVENT(hugepage_set_pmd,
+DECLARE_EVENT_CLASS(hugepage_set,
- TP_PROTO(unsigned long addr, unsigned long pmd),
- TP_ARGS(addr, pmd),
+ TP_PROTO(unsigned long addr, unsigned long pte),
+ TP_ARGS(addr, pte),
TP_STRUCT__entry(
__field(unsigned long, addr)
- __field(unsigned long, pmd)
+ __field(unsigned long, pte)
),
TP_fast_assign(
__entry->addr = addr;
- __entry->pmd = pmd;
+ __entry->pte = pte;
),
- TP_printk("Set pmd with 0x%lx with 0x%lx", __entry->addr, __entry->pmd)
+ TP_printk("Set page table entry with 0x%lx with 0x%lx", __entry->addr, __entry->pte)
);
+DEFINE_EVENT(hugepage_set, hugepage_set_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd),
+ TP_ARGS(addr, pmd)
+);
-TRACE_EVENT(hugepage_update,
+DEFINE_EVENT(hugepage_set, hugepage_set_pud,
+ TP_PROTO(unsigned long addr, unsigned long pud),
+ TP_ARGS(addr, pud)
+);
+
+DECLARE_EVENT_CLASS(hugepage_update,
TP_PROTO(unsigned long addr, unsigned long pte, unsigned long clr, unsigned long set),
TP_ARGS(addr, pte, clr, set),
@@ -48,6 +57,16 @@ TRACE_EVENT(hugepage_update,
TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set)
);
+DEFINE_EVENT(hugepage_update, hugepage_update_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd, unsigned long clr, unsigned long set),
+ TP_ARGS(addr, pmd, clr, set)
+);
+
+DEFINE_EVENT(hugepage_update, hugepage_update_pud,
+ TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set),
+ TP_ARGS(addr, pud, clr, set)
+);
+
DECLARE_EVENT_CLASS(migration_pmd,
TP_PROTO(unsigned long addr, unsigned long pmd),
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 3e8619c72f77..1ef58a04fc57 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -46,22 +46,21 @@ DEFINE_EVENT(timer_class, timer_init,
/**
* timer_start - called when the timer is started
- * @timer: pointer to struct timer_list
- * @expires: the timers expiry time
- * @flags: the timers flags
+ * @timer: pointer to struct timer_list
+ * @bucket_expiry: the bucket expiry time
*/
TRACE_EVENT(timer_start,
TP_PROTO(struct timer_list *timer,
- unsigned long expires,
- unsigned int flags),
+ unsigned long bucket_expiry),
- TP_ARGS(timer, expires, flags),
+ TP_ARGS(timer, bucket_expiry),
TP_STRUCT__entry(
__field( void *, timer )
__field( void *, function )
__field( unsigned long, expires )
+ __field( unsigned long, bucket_expiry )
__field( unsigned long, now )
__field( unsigned int, flags )
),
@@ -69,15 +68,16 @@ TRACE_EVENT(timer_start,
TP_fast_assign(
__entry->timer = timer;
__entry->function = timer->function;
- __entry->expires = expires;
+ __entry->expires = timer->expires;
+ __entry->bucket_expiry = bucket_expiry;
__entry->now = jiffies;
- __entry->flags = flags;
+ __entry->flags = timer->flags;
),
- TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
+ TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] bucket_expiry=%lu cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now,
- __entry->flags & TIMER_CPUMASK,
+ __entry->bucket_expiry, __entry->flags & TIMER_CPUMASK,
__entry->flags >> TIMER_ARRAYSHIFT,
decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
);
@@ -142,6 +142,26 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+TRACE_EVENT(timer_base_idle,
+
+ TP_PROTO(bool is_idle, unsigned int cpu),
+
+ TP_ARGS(is_idle, cpu),
+
+ TP_STRUCT__entry(
+ __field( bool, is_idle )
+ __field( unsigned int, cpu )
+ ),
+
+ TP_fast_assign(
+ __entry->is_idle = is_idle;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("is_idle=%d cpu=%d",
+ __entry->is_idle, __entry->cpu)
+);
+
#define decode_clockid(type) \
__print_symbolic(type, \
{ CLOCK_REALTIME, "CLOCK_REALTIME" }, \
@@ -158,7 +178,11 @@ DEFINE_EVENT(timer_class, timer_cancel,
{ HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
{ HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
{ HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
- { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
+ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" }, \
+ { HRTIMER_MODE_ABS_HARD, "ABS|HARD" }, \
+ { HRTIMER_MODE_REL_HARD, "REL|HARD" }, \
+ { HRTIMER_MODE_ABS_PINNED_HARD, "ABS|PINNED|HARD" }, \
+ { HRTIMER_MODE_REL_PINNED_HARD, "REL|PINNED|HARD" })
/**
* hrtimer_init - called when the hrtimer is initialized
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index 992517ac3292..b930669bd1f0 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -267,15 +267,15 @@ DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
TP_ARGS(dev_name, err, usecs, dev_state, link_state));
TRACE_EVENT(ufshcd_command,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t,
+ TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
u32 intr, u64 lba, u8 opcode, u8 group_id),
- TP_ARGS(dev_name, str_t, tag, doorbell, hwq_id, transfer_len,
- intr, lba, opcode, group_id),
+ TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
+ opcode, group_id),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct scsi_device *, sdev)
__field(enum ufs_trace_str_t, str_t)
__field(unsigned int, tag)
__field(u32, doorbell)
@@ -288,7 +288,7 @@ TRACE_EVENT(ufshcd_command,
),
TP_fast_assign(
- __assign_str(dev_name, dev_name);
+ __entry->sdev = sdev;
__entry->str_t = str_t;
__entry->tag = tag;
__entry->doorbell = doorbell;
@@ -302,8 +302,9 @@ TRACE_EVENT(ufshcd_command,
TP_printk(
"%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x, hwq_id: %d",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
- __entry->tag, __entry->doorbell, __entry->transfer_len, __entry->intr,
+ show_ufs_cmd_trace_str(__entry->str_t),
+ dev_name(&__entry->sdev->sdev_dev), __entry->tag,
+ __entry->doorbell, __entry->transfer_len, __entry->intr,
__entry->lba, (u32)__entry->opcode, str_opcode(__entry->opcode),
(u32)__entry->group_id, __entry->hwq_id
)
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index d2123dd960d5..1a488c30afa5 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -285,10 +285,9 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
unsigned long nr_scanned,
unsigned long nr_skipped,
unsigned long nr_taken,
- isolate_mode_t isolate_mode,
int lru),
- TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru),
+ TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, lru),
TP_STRUCT__entry(
__field(int, highest_zoneidx)
@@ -297,7 +296,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_skipped)
__field(unsigned long, nr_taken)
- __field(unsigned int, isolate_mode)
__field(int, lru)
),
@@ -308,7 +306,6 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
__entry->nr_scanned = nr_scanned;
__entry->nr_skipped = nr_skipped;
__entry->nr_taken = nr_taken;
- __entry->isolate_mode = (__force unsigned int)isolate_mode;
__entry->lru = lru;
),
@@ -316,8 +313,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate,
* classzone is previous name of the highest_zoneidx.
* Reason not to change it is the ABI requirement of the tracepoint.
*/
- TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s",
- __entry->isolate_mode,
+ TP_printk("classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s",
__entry->highest_zoneidx,
__entry->order,
__entry->nr_requested,
diff --git a/include/trace/events/vsock_virtio_transport_common.h b/include/trace/events/vsock_virtio_transport_common.h
index d0b3f0ea9ba1..f1ebe36787c3 100644
--- a/include/trace/events/vsock_virtio_transport_common.h
+++ b/include/trace/events/vsock_virtio_transport_common.h
@@ -43,7 +43,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__u32 len,
__u16 type,
__u16 op,
- __u32 flags
+ __u32 flags,
+ bool zcopy
),
TP_ARGS(
src_cid, src_port,
@@ -51,7 +52,8 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
len,
type,
op,
- flags
+ flags,
+ zcopy
),
TP_STRUCT__entry(
__field(__u32, src_cid)
@@ -62,6 +64,7 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__field(__u16, type)
__field(__u16, op)
__field(__u32, flags)
+ __field(bool, zcopy)
),
TP_fast_assign(
__entry->src_cid = src_cid;
@@ -72,14 +75,15 @@ TRACE_EVENT(virtio_transport_alloc_pkt,
__entry->type = type;
__entry->op = op;
__entry->flags = flags;
+ __entry->zcopy = zcopy;
),
- TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x",
+ TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s",
__entry->src_cid, __entry->src_port,
__entry->dst_cid, __entry->dst_port,
__entry->len,
show_type(__entry->type),
show_op(__entry->op),
- __entry->flags)
+ __entry->flags, __entry->zcopy ? "true" : "false")
);
TRACE_EVENT(virtio_transport_recv_pkt,
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
index 9c66e59d859c..4661f0d27062 100644
--- a/include/trace/events/wbt.h
+++ b/include/trace/events/wbt.h
@@ -33,7 +33,7 @@ TRACE_EVENT(wbt_stat,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->rmean = stat[0].mean;
__entry->rmin = stat[0].min;
@@ -68,7 +68,7 @@ TRACE_EVENT(wbt_lat,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->lat = div_u64(lat, 1000);
),
@@ -105,7 +105,7 @@ TRACE_EVENT(wbt_step,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->msg = msg;
__entry->step = step;
@@ -141,7 +141,7 @@ TRACE_EVENT(wbt_timer,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->status = status;
__entry->step = step;
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index c40fc97f9417..9adc2bdf2f94 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -9,6 +9,7 @@
#include <linux/filter.h>
#include <linux/tracepoint.h>
#include <linux/bpf.h>
+#include <net/xdp.h>
#define __XDP_ACT_MAP(FN) \
FN(ABORTED) \
@@ -404,6 +405,23 @@ TRACE_EVENT(mem_return_failed,
)
);
+TRACE_EVENT(bpf_xdp_link_attach_failed,
+
+ TP_PROTO(const char *msg),
+
+ TP_ARGS(msg),
+
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ ),
+
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("errmsg=%s", __get_str(msg))
+);
+
#endif /* _TRACE_XDP_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 44a3f565264d..0577f0cdd231 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -6,26 +6,26 @@
#define _TRACE_XEN_H
#include <linux/tracepoint.h>
-#include <asm/paravirt_types.h>
+#include <asm/xen/hypervisor.h>
#include <asm/xen/trace_types.h>
struct multicall_entry;
/* Multicalls */
DECLARE_EVENT_CLASS(xen_mc__batch,
- TP_PROTO(enum paravirt_lazy_mode mode),
+ TP_PROTO(enum xen_lazy_mode mode),
TP_ARGS(mode),
TP_STRUCT__entry(
- __field(enum paravirt_lazy_mode, mode)
+ __field(enum xen_lazy_mode, mode)
),
TP_fast_assign(__entry->mode = mode),
TP_printk("start batch LAZY_%s",
- (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
- (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
+ (__entry->mode == XEN_LAZY_MMU) ? "MMU" :
+ (__entry->mode == XEN_LAZY_CPU) ? "CPU" : "NONE")
);
#define DEFINE_XEN_MC_BATCH(name) \
DEFINE_EVENT(xen_mc__batch, name, \
- TP_PROTO(enum paravirt_lazy_mode mode), \
+ TP_PROTO(enum xen_lazy_mode mode), \
TP_ARGS(mode))
DEFINE_XEN_MC_BATCH(xen_mc_batch);
diff --git a/include/uapi/asm-generic/bitsperlong.h b/include/uapi/asm-generic/bitsperlong.h
index 693d9a40eb7b..352cb81947b8 100644
--- a/include/uapi/asm-generic/bitsperlong.h
+++ b/include/uapi/asm-generic/bitsperlong.h
@@ -2,6 +2,17 @@
#ifndef _UAPI__ASM_GENERIC_BITS_PER_LONG
#define _UAPI__ASM_GENERIC_BITS_PER_LONG
+#ifndef __BITS_PER_LONG
+/*
+ * In order to keep safe and avoid regression, only unify uapi
+ * bitsperlong.h for some archs which are using newer toolchains
+ * that have the definitions of __CHAR_BIT__ and __SIZEOF_LONG__.
+ * See the following link for more info:
+ * https://lore.kernel.org/linux-arch/b9624545-2c80-49a1-ac3c-39264a591f7b@app.fastmail.com/
+ */
+#if defined(__CHAR_BIT__) && defined(__SIZEOF_LONG__)
+#define __BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
+#else
/*
* There seems to be no way of detecting this automatically from user
* space, so 64 bit architectures should override this in their
@@ -9,8 +20,8 @@
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
* to decide it, but rather check a compiler provided macro.
*/
-#ifndef __BITS_PER_LONG
#define __BITS_PER_LONG 32
#endif
+#endif
#endif /* _UAPI__ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index ffbe4cec9f32..b7bc545ec3b2 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -68,11 +68,6 @@ union __sifields {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void __user *_addr; /* faulting insn/memory ref. */
-#ifdef __ia64__
- int _imm; /* immediate value for "break" */
- unsigned int _flags; /* see ia64 si_flags */
- unsigned long _isr; /* isr */
-#endif
#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \
sizeof(short) : __alignof__(void *))
@@ -242,7 +237,8 @@ typedef struct siginfo {
#define SEGV_ADIPERR 7 /* Precise MCD exception */
#define SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
-#define NSIGSEGV 9
+#define SEGV_CPERR 10 /* Control protection fault */
+#define NSIGSEGV 10
/*
* SIGBUS si_codes
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 638230899e98..8ce8a39a1e5f 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -132,6 +132,9 @@
#define SO_RCVMARK 75
+#define SO_PASSPIDFD 76
+#define SO_PEERPIDFD 77
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 45fa180cc56a..75f00965ab15 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -38,12 +38,12 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
#define __NR_io_cancel 3
__SYSCALL(__NR_io_cancel, sys_io_cancel)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_getevents 4
__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
#endif
-/* fs/xattr.c */
#define __NR_setxattr 5
__SYSCALL(__NR_setxattr, sys_setxattr)
#define __NR_lsetxattr 6
@@ -68,58 +68,38 @@ __SYSCALL(__NR_removexattr, sys_removexattr)
__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
#define __NR_fremovexattr 16
__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-
-/* fs/dcache.c */
#define __NR_getcwd 17
__SYSCALL(__NR_getcwd, sys_getcwd)
-
-/* fs/cookies.c */
#define __NR_lookup_dcookie 18
-__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
-
-/* fs/eventfd.c */
+__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall)
#define __NR_eventfd2 19
__SYSCALL(__NR_eventfd2, sys_eventfd2)
-
-/* fs/eventpoll.c */
#define __NR_epoll_create1 20
__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
#define __NR_epoll_ctl 21
__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
#define __NR_epoll_pwait 22
__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
-
-/* fs/fcntl.c */
#define __NR_dup 23
__SYSCALL(__NR_dup, sys_dup)
#define __NR_dup3 24
__SYSCALL(__NR_dup3, sys_dup3)
#define __NR3264_fcntl 25
__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
-
-/* fs/inotify_user.c */
#define __NR_inotify_init1 26
__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
#define __NR_inotify_add_watch 27
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 28
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-
-/* fs/ioctl.c */
#define __NR_ioctl 29
__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
-
-/* fs/ioprio.c */
#define __NR_ioprio_set 30
__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
#define __NR_ioprio_get 31
__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-
-/* fs/locks.c */
#define __NR_flock 32
__SYSCALL(__NR_flock, sys_flock)
-
-/* fs/namei.c */
#define __NR_mknodat 33
__SYSCALL(__NR_mknodat, sys_mknodat)
#define __NR_mkdirat 34
@@ -130,25 +110,21 @@ __SYSCALL(__NR_unlinkat, sys_unlinkat)
__SYSCALL(__NR_symlinkat, sys_symlinkat)
#define __NR_linkat 37
__SYSCALL(__NR_linkat, sys_linkat)
+
#ifdef __ARCH_WANT_RENAMEAT
/* renameat is superseded with flags by renameat2 */
#define __NR_renameat 38
__SYSCALL(__NR_renameat, sys_renameat)
#endif /* __ARCH_WANT_RENAMEAT */
-/* fs/namespace.c */
#define __NR_umount2 39
__SYSCALL(__NR_umount2, sys_umount)
#define __NR_mount 40
__SYSCALL(__NR_mount, sys_mount)
#define __NR_pivot_root 41
__SYSCALL(__NR_pivot_root, sys_pivot_root)
-
-/* fs/nfsctl.c */
#define __NR_nfsservctl 42
__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
-
-/* fs/open.c */
#define __NR3264_statfs 43
__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
compat_sys_statfs64)
@@ -161,7 +137,6 @@ __SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
#define __NR3264_ftruncate 46
__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
compat_sys_ftruncate64)
-
#define __NR_fallocate 47
__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
#define __NR_faccessat 48
@@ -186,20 +161,12 @@ __SYSCALL(__NR_openat, sys_openat)
__SYSCALL(__NR_close, sys_close)
#define __NR_vhangup 58
__SYSCALL(__NR_vhangup, sys_vhangup)
-
-/* fs/pipe.c */
#define __NR_pipe2 59
__SYSCALL(__NR_pipe2, sys_pipe2)
-
-/* fs/quota.c */
#define __NR_quotactl 60
__SYSCALL(__NR_quotactl, sys_quotactl)
-
-/* fs/readdir.c */
#define __NR_getdents64 61
__SYSCALL(__NR_getdents64, sys_getdents64)
-
-/* fs/read_write.c */
#define __NR3264_lseek 62
__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
#define __NR_read 63
@@ -218,12 +185,9 @@ __SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
#define __NR_pwritev 70
__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
-
-/* fs/sendfile.c */
#define __NR3264_sendfile 71
__SYSCALL(__NR3264_sendfile, sys_sendfile64)
-/* fs/select.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_pselect6 72
__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
@@ -231,21 +195,17 @@ __SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_psel
__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
#endif
-/* fs/signalfd.c */
#define __NR_signalfd4 74
__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
-
-/* fs/splice.c */
#define __NR_vmsplice 75
__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_splice 76
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 77
__SYSCALL(__NR_tee, sys_tee)
-
-/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
+
#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
@@ -253,13 +213,13 @@ __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
#endif
-/* fs/sync.c */
#define __NR_sync 81
__SYSCALL(__NR_sync, sys_sync)
#define __NR_fsync 82
__SYSCALL(__NR_fsync, sys_fsync)
#define __NR_fdatasync 83
__SYSCALL(__NR_fdatasync, sys_fdatasync)
+
#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
#define __NR_sync_file_range2 84
__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
@@ -270,9 +230,9 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
compat_sys_sync_file_range)
#endif
-/* fs/timerfd.c */
#define __NR_timerfd_create 85
__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timerfd_settime 86
__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
@@ -282,45 +242,35 @@ __SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
sys_timerfd_gettime)
#endif
-/* fs/utimes.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_utimensat 88
__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
#endif
-/* kernel/acct.c */
#define __NR_acct 89
__SYSCALL(__NR_acct, sys_acct)
-
-/* kernel/capability.c */
#define __NR_capget 90
__SYSCALL(__NR_capget, sys_capget)
#define __NR_capset 91
__SYSCALL(__NR_capset, sys_capset)
-
-/* kernel/exec_domain.c */
#define __NR_personality 92
__SYSCALL(__NR_personality, sys_personality)
-
-/* kernel/exit.c */
#define __NR_exit 93
__SYSCALL(__NR_exit, sys_exit)
#define __NR_exit_group 94
__SYSCALL(__NR_exit_group, sys_exit_group)
#define __NR_waitid 95
__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
-
-/* kernel/fork.c */
#define __NR_set_tid_address 96
__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
#define __NR_unshare 97
__SYSCALL(__NR_unshare, sys_unshare)
-/* kernel/futex.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_futex 98
__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
#endif
+
#define __NR_set_robust_list 99
__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
compat_sys_set_robust_list)
@@ -328,43 +278,40 @@ __SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
compat_sys_get_robust_list)
-/* kernel/hrtimer.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_nanosleep 101
__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
#endif
-/* kernel/itimer.c */
#define __NR_getitimer 102
__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
#define __NR_setitimer 103
__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
-
-/* kernel/kexec.c */
#define __NR_kexec_load 104
__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
-
-/* kernel/module.c */
#define __NR_init_module 105
__SYSCALL(__NR_init_module, sys_init_module)
#define __NR_delete_module 106
__SYSCALL(__NR_delete_module, sys_delete_module)
-
-/* kernel/posix-timers.c */
#define __NR_timer_create 107
__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_gettime 108
__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
#endif
+
#define __NR_timer_getoverrun 109
__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_timer_settime 110
__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
#endif
+
#define __NR_timer_delete 111
__SYSCALL(__NR_timer_delete, sys_timer_delete)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_settime 112
__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
@@ -377,15 +324,10 @@ __SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
sys_clock_nanosleep)
#endif
-/* kernel/printk.c */
#define __NR_syslog 116
__SYSCALL(__NR_syslog, sys_syslog)
-
-/* kernel/ptrace.c */
#define __NR_ptrace 117
__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
-
-/* kernel/sched/core.c */
#define __NR_sched_setparam 118
__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
#define __NR_sched_setscheduler 119
@@ -406,13 +348,13 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
#define __NR_sched_get_priority_min 126
__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_sched_rr_get_interval 127
__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
sys_sched_rr_get_interval)
#endif
-/* kernel/signal.c */
#define __NR_restart_syscall 128
__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
#define __NR_kill 129
@@ -431,18 +373,18 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
#define __NR_rt_sigpending 136
__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_rt_sigtimedwait 137
__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
#endif
+
#define __NR_rt_sigqueueinfo 138
__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
compat_sys_rt_sigqueueinfo)
#define __NR_rt_sigreturn 139
__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
-
-/* kernel/sys.c */
#define __NR_setpriority 140
__SYSCALL(__NR_setpriority, sys_setpriority)
#define __NR_getpriority 141
@@ -507,7 +449,6 @@ __SYSCALL(__NR_prctl, sys_prctl)
#define __NR_getcpu 168
__SYSCALL(__NR_getcpu, sys_getcpu)
-/* kernel/time.c */
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_gettimeofday 169
__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
@@ -517,7 +458,6 @@ __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
#endif
-/* kernel/sys.c */
#define __NR_getpid 172
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_getppid 173
@@ -534,12 +474,11 @@ __SYSCALL(__NR_getegid, sys_getegid)
__SYSCALL(__NR_gettid, sys_gettid)
#define __NR_sysinfo 179
__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
-
-/* ipc/mqueue.c */
#define __NR_mq_open 180
__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
#define __NR_mq_unlink 181
__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_mq_timedsend 182
__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
@@ -547,12 +486,11 @@ __SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
sys_mq_timedreceive)
#endif
+
#define __NR_mq_notify 184
__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
#define __NR_mq_getsetattr 185
__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
-
-/* ipc/msg.c */
#define __NR_msgget 186
__SYSCALL(__NR_msgget, sys_msgget)
#define __NR_msgctl 187
@@ -561,20 +499,18 @@ __SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
#define __NR_msgsnd 189
__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
-
-/* ipc/sem.c */
#define __NR_semget 190
__SYSCALL(__NR_semget, sys_semget)
#define __NR_semctl 191
__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_semtimedop 192
__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
#endif
+
#define __NR_semop 193
__SYSCALL(__NR_semop, sys_semop)
-
-/* ipc/shm.c */
#define __NR_shmget 194
__SYSCALL(__NR_shmget, sys_shmget)
#define __NR_shmctl 195
@@ -583,8 +519,6 @@ __SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
#define __NR_shmdt 197
__SYSCALL(__NR_shmdt, sys_shmdt)
-
-/* net/socket.c */
#define __NR_socket 198
__SYSCALL(__NR_socket, sys_socket)
#define __NR_socketpair 199
@@ -615,40 +549,30 @@ __SYSCALL(__NR_shutdown, sys_shutdown)
__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
#define __NR_recvmsg 212
__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
-
-/* mm/filemap.c */
#define __NR_readahead 213
__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
-
-/* mm/nommu.c, also with MMU */
#define __NR_brk 214
__SYSCALL(__NR_brk, sys_brk)
#define __NR_munmap 215
__SYSCALL(__NR_munmap, sys_munmap)
#define __NR_mremap 216
__SYSCALL(__NR_mremap, sys_mremap)
-
-/* security/keys/keyctl.c */
#define __NR_add_key 217
__SYSCALL(__NR_add_key, sys_add_key)
#define __NR_request_key 218
__SYSCALL(__NR_request_key, sys_request_key)
#define __NR_keyctl 219
__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
-
-/* arch/example/kernel/sys_example.c */
#define __NR_clone 220
__SYSCALL(__NR_clone, sys_clone)
#define __NR_execve 221
__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
-
#define __NR3264_mmap 222
__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
-/* mm/fadvise.c */
#define __NR3264_fadvise64 223
__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
-/* mm/, CONFIG_MMU only */
+/* CONFIG_MMU only */
#ifndef __ARCH_NOMMU
#define __NR_swapon 224
__SYSCALL(__NR_swapon, sys_swapon)
@@ -691,6 +615,7 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_accept4 242
__SYSCALL(__NR_accept4, sys_accept4)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_recvmmsg 243
__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
@@ -706,6 +631,7 @@ __SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recv
#define __NR_wait4 260
__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
#endif
+
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#define __NR_fanotify_init 262
@@ -716,10 +642,12 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
#define __NR_open_by_handle_at 265
__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_clock_adjtime 266
__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
#endif
+
#define __NR_syncfs 267
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
@@ -770,15 +698,19 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
__SYSCALL(__NR_pkey_free, sys_pkey_free)
#define __NR_statx 291
__SYSCALL(__NR_statx, sys_statx)
+
#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
#define __NR_io_pgetevents 292
__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
#endif
+
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
+
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
+
#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
@@ -844,13 +776,14 @@ __SYSCALL(__NR_fsmount, sys_fsmount)
__SYSCALL(__NR_fspick, sys_fspick)
#define __NR_pidfd_open 434
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
+
#ifdef __ARCH_WANT_SYS_CLONE3
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+
#define __NR_close_range 436
__SYSCALL(__NR_close_range, sys_close_range)
-
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
#define __NR_pidfd_getfd 438
@@ -865,7 +798,6 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
#define __NR_quotactl_fd 443
__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
-
#define __NR_landlock_create_ruleset 444
__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
#define __NR_landlock_add_rule 445
@@ -877,17 +809,41 @@ __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
#define __NR_memfd_secret 447
__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
#endif
+
#define __NR_process_mrelease 448
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
-
#define __NR_futex_waitv 449
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
-
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
+#define __NR_cachestat 451
+__SYSCALL(__NR_cachestat, sys_cachestat)
+#define __NR_fchmodat2 452
+__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+#define __NR_map_shadow_stack 453
+__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack)
+#define __NR_futex_wake 454
+__SYSCALL(__NR_futex_wake, sys_futex_wake)
+#define __NR_futex_wait 455
+__SYSCALL(__NR_futex_wait, sys_futex_wait)
+#define __NR_futex_requeue 456
+__SYSCALL(__NR_futex_requeue, sys_futex_requeue)
+
+#define __NR_statmount 457
+__SYSCALL(__NR_statmount, sys_statmount)
+
+#define __NR_listmount 458
+__SYSCALL(__NR_listmount, sys_listmount)
+
+#define __NR_lsm_get_self_attr 459
+__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr)
+#define __NR_lsm_set_self_attr 460
+__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr)
+#define __NR_lsm_list_modules 461
+__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules)
#undef __NR_syscalls
-#define __NR_syscalls 451
+#define __NR_syscalls 462
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index b6eb90df5d05..ad21c613fec8 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -94,6 +94,9 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
+ *
+ * %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
+ * signalling user mode queues.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -101,12 +104,14 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
+#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
- AMDGPU_GEM_DOMAIN_OA)
+ AMDGPU_GEM_DOMAIN_OA | \
+ AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -145,7 +150,7 @@ extern "C" {
*/
#define AMDGPU_GEM_CREATE_DISCARDABLE (1 << 12)
/* Flag that BO is shared coherently between multiple devices or CPU threads.
- * May depend on GPU instructions to flush caches explicitly
+ * May depend on GPU instructions to flush caches to system scope explicitly.
*
* This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
@@ -158,6 +163,14 @@ extern "C" {
* may override the MTYPE selected in AMDGPU_VA_OP_MAP.
*/
#define AMDGPU_GEM_CREATE_UNCACHED (1 << 14)
+/* Flag that BO should be coherent across devices when using device-level
+ * atomics. May depend on GPU instructions to flush caches to device scope
+ * explicitly, promoting them to system scope automatically.
+ *
+ * This influences the choice of MTYPE in the PTEs on GFXv9 and later GPUs and
+ * may override the MTYPE selected in AMDGPU_VA_OP_MAP.
+ */
+#define AMDGPU_GEM_CREATE_EXT_COHERENT (1 << 15)
struct drm_amdgpu_gem_create_in {
/** the requested memory size */
@@ -236,15 +249,17 @@ union drm_amdgpu_bo_list {
/* unknown cause */
#define AMDGPU_CTX_UNKNOWN_RESET 3
-/* indicate gpu reset occured after ctx created */
+/* indicate gpu reset occurred after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0)
-/* indicate vram lost occured after ctx created */
+/* indicate vram lost occurred after ctx created */
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
/* indicate some errors are detected by RAS */
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
+/* indicate that the reset hasn't completed yet */
+#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
@@ -579,7 +594,8 @@ struct drm_amdgpu_gem_va {
*/
#define AMDGPU_HW_IP_VCN_ENC 7
#define AMDGPU_HW_IP_VCN_JPEG 8
-#define AMDGPU_HW_IP_NUM 9
+#define AMDGPU_HW_IP_VPE 9
+#define AMDGPU_HW_IP_NUM 10
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@@ -592,6 +608,7 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
+#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@@ -708,6 +725,15 @@ struct drm_amdgpu_cs_chunk_data {
};
};
+#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
+
+struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
+ __u64 shadow_va;
+ __u64 csa_va;
+ __u64 gds_va;
+ __u64 flags;
+};
+
/*
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
@@ -780,6 +806,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_MES 0x1a
/* Subquery id: Query IMU firmware version */
#define AMDGPU_INFO_FW_IMU 0x1b
+ /* Subquery id: Query VPE firmware version */
+ #define AMDGPU_INFO_FW_VPE 0x1c
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
@@ -876,6 +904,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
/* Subquery id: Encode */
#define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
+/* Query the max number of IBs per gang per submission */
+#define AMDGPU_INFO_MAX_IBS 0x22
+/* query last page fault info */
+#define AMDGPU_INFO_GPUVM_FAULT 0x23
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -1126,6 +1158,14 @@ struct drm_amdgpu_info_device {
__u64 mall_size; /* AKA infinity cache */
/* high 32 bits of the rb pipes mask */
__u32 enabled_rb_pipes_mask_hi;
+ /* shadow area size for gfx11 */
+ __u32 shadow_size;
+ /* shadow area base virtual alignment for gfx11 */
+ __u32 shadow_alignment;
+ /* context save area size for gfx11 */
+ __u32 csa_size;
+ /* context save area base virtual alignment for gfx11 */
+ __u32 csa_alignment;
};
struct drm_amdgpu_info_hw_ip {
@@ -1193,6 +1233,20 @@ struct drm_amdgpu_info_video_caps {
struct drm_amdgpu_info_video_codec_info codec_info[AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_COUNT];
};
+#define AMDGPU_VMHUB_TYPE_MASK 0xff
+#define AMDGPU_VMHUB_TYPE_SHIFT 0
+#define AMDGPU_VMHUB_TYPE_GFX 0
+#define AMDGPU_VMHUB_TYPE_MM0 1
+#define AMDGPU_VMHUB_TYPE_MM1 2
+#define AMDGPU_VMHUB_IDX_MASK 0xff00
+#define AMDGPU_VMHUB_IDX_SHIFT 8
+
+struct drm_amdgpu_info_gpuvm_fault {
+ __u64 addr;
+ __u32 status;
+ __u32 vmhub;
+};
+
/*
* Supported GPU families
*/
@@ -1211,6 +1265,7 @@ struct drm_amdgpu_info_video_caps {
#define AMDGPU_FAMILY_GC_11_0_1 148 /* GC 11.0.1 */
#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */
#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */
+#define AMDGPU_FAMILY_GC_11_5_0 150 /* GC 11.5.0 */
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index a87bbbbca2d4..16122819edfe 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -673,8 +673,11 @@ struct drm_gem_open {
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
- * PRIME buffers are exposed as dma-buf file descriptors. See
- * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
+ * &DRM_PRIME_CAP_EXPORT are always advertised.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors.
+ * See :ref:`prime_buffer_sharing`.
*/
#define DRM_CAP_PRIME 0x5
/**
@@ -682,6 +685,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
@@ -689,6 +694,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
@@ -706,7 +713,8 @@ struct drm_gem_open {
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
- * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
+ * page-flips.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/**
@@ -756,17 +764,23 @@ struct drm_gem_open {
/**
* DRM_CAP_SYNCOBJ
*
- * If set to 1, the driver supports sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
+/**
+ * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
+ * commits.
+ */
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -836,6 +850,31 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+/**
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
+ *
+ * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
+ * virtualbox) have additional restrictions for cursor planes (thus
+ * making cursor planes on those drivers not truly universal,) e.g.
+ * they need cursor planes to act like one would expect from a mouse
+ * cursor and have correctly set hotspot properties.
+ * If this client cap is not set the DRM core will hide cursor plane on
+ * those virtualized drivers because not setting it implies that the
+ * client is not capable of dealing with those extra restictions.
+ * Clients which do set cursor hotspot and treat the cursor plane
+ * like a mouse cursor should set this property.
+ * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * Setting this property on drivers which do not special case
+ * cursor planes (i.e. non-virtualized drivers) will return
+ * EOPNOTSUPP, which can be used by userspace to gauge
+ * requirements of the hardware/drivers they're running on.
+ *
+ * This capability is always supported for atomic-capable virtualized
+ * drivers starting from kernel version 6.6.
+ */
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
+
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@@ -887,6 +926,7 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -895,6 +935,14 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@@ -907,6 +955,35 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
+};
+
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ * available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+ __s32 fd;
+ __u32 pad;
};
@@ -1107,6 +1184,26 @@ extern "C" {
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+/**
+ * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
+ *
+ * KMS dumb buffers provide a very primitive way to allocate a buffer object
+ * suitable for scanout and map it for software rendering. KMS dumb buffers are
+ * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
+ * buffers are not suitable to be displayed on any other device than the KMS
+ * device where they were allocated from. Also see
+ * :ref:`kms_dumb_buffer_objects`.
+ *
+ * The IOCTL argument is a struct drm_mode_create_dumb.
+ *
+ * User-space is expected to create a KMS dumb buffer via this IOCTL, then add
+ * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
+ * &DRM_IOCTL_MODE_MAP_DUMB.
+ *
+ * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
+ * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
+ * driver preferences for dumb buffers.
+ */
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
@@ -1169,6 +1266,28 @@ extern "C" {
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+
+/**
+ * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
+ *
+ * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
+ * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
+ * alive. When the plane no longer uses the framebuffer (because the
+ * framebuffer is replaced with another one, or the plane is disabled), the
+ * framebuffer is cleaned up.
+ *
+ * This is useful to implement flicker-free transitions between two processes.
+ *
+ * Depending on the threat model, user-space may want to ensure that the
+ * framebuffer doesn't expose any sensitive user information: closed
+ * framebuffers attached to a plane can be read back by the next DRM master.
+ */
+#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@@ -1180,25 +1299,50 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/*
- * Header for events written back to userspace on the drm fd. The
- * type defines the type of event, the length specifies the total
- * length of the event (including the header), and user_data is
- * typically a 64 bit value passed with the ioctl that triggered the
- * event. A read on the drm fd will always only return complete
- * events, that is, if for example the read buffer is 100 bytes, and
- * there are two 64 byte events pending, only one will be returned.
+/**
+ * struct drm_event - Header for DRM events
+ * @type: event type.
+ * @length: total number of payload bytes (including header).
*
- * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
- * up are chipset specific.
+ * This struct is a header for events written back to user-space on the DRM FD.
+ * A read on the DRM FD will always only return complete events: e.g. if the
+ * read buffer is 100 bytes large and there are two 64 byte events pending,
+ * only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
+ * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
+ * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
*/
struct drm_event {
__u32 type;
__u32 length;
};
+/**
+ * DRM_EVENT_VBLANK - vertical blanking event
+ *
+ * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
+ * &_DRM_VBLANK_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_VBLANK 0x01
+/**
+ * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
+ *
+ * This event is sent in response to an atomic commit or legacy page-flip with
+ * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_FLIP_COMPLETE 0x02
+/**
+ * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
+ *
+ * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
+ *
+ * The event payload is a struct drm_event_crtc_sequence.
+ */
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index de703c6be969..84d502e42961 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -54,7 +54,7 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
- * modifier is specific to the modifer being used. For example, some modifiers
+ * modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
@@ -79,7 +79,7 @@ extern "C" {
* format.
* - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
* see modifiers as opaque tokens they can check for equality and intersect.
- * These users musn't need to know to reason about the modifier value
+ * These users mustn't need to know to reason about the modifier value
* (i.e. they are not expected to extract information out of the modifier).
*
* Vendors should document their modifier usage in as much detail as
@@ -323,6 +323,8 @@ extern "C" {
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
+#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
@@ -538,7 +540,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
- * Each group therefore consits out of four 256 byte units, which are also laid
+ * Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@@ -658,6 +660,49 @@ extern "C" {
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
+
+/*
+ * Intel Color Control Surfaces (CCS) for display ver. 14 media compression
+ *
+ * The main surface is tile4 and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * tile4 widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
+
+/*
+ * Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
+ * compression.
+ *
+ * The main surface is tile4 and is at plane index 0 whereas CCS is linear
+ * and at index 1. The clear color is stored at index 2, and the pitch should
+ * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * represents Raw Clear Color Red, Green, Blue and Alpha color each represented
+ * by 32 bits. The raw clear color is consumed by the 3d engine and generates
+ * the converted clear color of size 64 bits. The first 32 bits store the Lower
+ * Converted Clear Color value and the next 32 bits store the Higher Converted
+ * Clear Color value when applicable. The Converted Clear Color values are
+ * consumed by the DE. The last 64 bits are used to store Color Discard Enable
+ * and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
+ * corresponds to an area of 4x1 tiles in the main surface. The main surface
+ * pitch is required to be a multiple of 4 tile widths.
+ */
+#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -1058,7 +1103,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
/*
- * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
+ * The top 4 bits (out of the 56 bits allotted for specifying vendor specific
* modifiers) denote the category for modifiers. Currently we have three
* categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
* sixteen different categories.
@@ -1374,7 +1419,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
- * boudaries, i.e. 8bit should be stored in this mode to save allocation
+ * boundaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 46becedf5b2f..7040e7ea80c7 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -36,10 +36,10 @@ extern "C" {
/**
* DOC: overview
*
- * DRM exposes many UAPI and structure definition to have a consistent
- * and standardized interface with user.
+ * DRM exposes many UAPI and structure definitions to have a consistent
+ * and standardized interface with users.
* Userspace can refer to these structure definitions and UAPI formats
- * to communicate to driver
+ * to communicate to drivers.
*/
#define DRM_CONNECTOR_NAME_LEN 32
@@ -488,6 +488,9 @@ struct drm_mode_get_connector {
* This is not an object ID. This is a per-type connector number. Each
* (type, type_id) combination is unique across all connectors of a DRM
* device.
+ *
+ * The (type, type_id) combination is not a stable identifier: the
+ * type_id can change depending on the driver probe order.
*/
__u32 connector_type_id;
@@ -537,7 +540,7 @@ struct drm_mode_get_connector {
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
- * witout being aware that this could be triggering a lengthy modeset.
+ * without being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
@@ -661,7 +664,7 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
-#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */
/**
* struct drm_mode_fb_cmd2 - Frame-buffer metadata.
@@ -834,10 +837,23 @@ struct drm_color_ctm {
/*
* Conversion matrix in S31.32 sign-magnitude
* (not two's complement!) format.
+ *
+ * out matrix in
+ * |R| |0 1 2| |R|
+ * |G| = |3 4 5| x |G|
+ * |B| |6 7 8| |B|
*/
__u64 matrix[9];
};
+struct drm_color_ctm_3x4 {
+ /*
+ * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
+ * (not two's complement!) format.
+ */
+ __u64 matrix[12];
+};
+
struct drm_color_lut {
/*
* Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
@@ -873,23 +889,23 @@ struct hdr_metadata_infoframe {
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
- * @display_primaries.x: X cordinate of color primary.
- * @display_primaries.y: Y cordinate of color primary.
+ * @display_primaries.x: X coordinate of color primary.
+ * @display_primaries.y: Y coordinate of color primary.
*/
struct {
__u16 x, y;
- } display_primaries[3];
+ } display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
- * @white_point.x: X cordinate of whitepoint of color primary.
- * @white_point.y: Y cordinate of whitepoint of color primary.
+ * @white_point.x: X coordinate of whitepoint of color primary.
+ * @white_point.y: Y coordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
- } white_point;
+ } white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
@@ -949,6 +965,15 @@ struct hdr_output_metadata {
* Request that the page-flip is performed as soon as possible, ie. with no
* delay due to waiting for vblank. This may cause tearing to be visible on
* the screen.
+ *
+ * When used with atomic uAPI, the driver will return an error if the hardware
+ * doesn't support performing an asynchronous page-flip for this update.
+ * User-space should handle this, e.g. by falling back to a regular page-flip.
+ *
+ * Note, some hardware might need to perform one last synchronous page-flip
+ * before being able to switch to asynchronous page-flips. As an exception,
+ * the driver will return success even though that first page-flip is not
+ * asynchronous.
*/
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
@@ -1024,13 +1049,25 @@ struct drm_mode_crtc_page_flip_target {
__u64 user_data;
};
-/* create a dumb scanout buffer */
+/**
+ * struct drm_mode_create_dumb - Create a KMS dumb buffer for scanout.
+ * @height: buffer height in pixels
+ * @width: buffer width in pixels
+ * @bpp: bits per pixel
+ * @flags: must be zero
+ * @handle: buffer object handle
+ * @pitch: number of bytes between two consecutive lines
+ * @size: size of the whole buffer in bytes
+ *
+ * User-space fills @height, @width, @bpp and @flags. If the IOCTL succeeds,
+ * the kernel fills @handle, @pitch and @size.
+ */
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
- /* handle, pitch, size will be returned */
+
__u32 handle;
__u32 pitch;
__u64 size;
@@ -1303,6 +1340,16 @@ struct drm_mode_rect {
__s32 y2;
};
+/**
+ * struct drm_mode_closefb
+ * @fb_id: Framebuffer ID.
+ * @pad: Must be zero.
+ */
+struct drm_mode_closefb {
+ __u32 fb_id;
+ __u32 pad;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/habanalabs_accel.h b/include/uapi/drm/habanalabs_accel.h
index d9ef1b151d04..a512dc4cffd0 100644
--- a/include/uapi/drm/habanalabs_accel.h
+++ b/include/uapi/drm/habanalabs_accel.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
*
- * Copyright 2016-2022 HabanaLabs, Ltd.
+ * Copyright 2016-2023 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -8,8 +8,7 @@
#ifndef HABANALABS_H_
#define HABANALABS_H_
-#include <linux/types.h>
-#include <linux/ioctl.h>
+#include <drm/drm.h>
/*
* Defines that are asic-specific but constitutes as ABI between kernel driver
@@ -607,9 +606,9 @@ enum gaudi2_engine_id {
/*
* ASIC specific PLL index
*
- * Used to retrieve in frequency info of different IPs via
- * HL_INFO_PLL_FREQUENCY under HL_IOCTL_INFO IOCTL. The enums need to be
- * used as an index in struct hl_pll_frequency_info
+ * Used to retrieve in frequency info of different IPs via HL_INFO_PLL_FREQUENCY under
+ * DRM_IOCTL_HL_INFO IOCTL.
+ * The enums need to be used as an index in struct hl_pll_frequency_info.
*/
enum hl_goya_pll_index {
@@ -787,18 +786,29 @@ enum hl_server_type {
* The address which accessing it caused the razwi.
* Razwi initiator.
* Razwi cause, was it a page fault or MMU access error.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
* HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES - Retrieve valid page sizes for device memory allocation
* HL_INFO_SECURED_ATTESTATION - Retrieve attestation report of the boot.
* HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications.
* HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd
* HL_INFO_GET_EVENTS - Retrieve the last occurred events
* HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
* HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic.
* HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
* HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event.
* HL_INFO_FW_GENERIC_REQ - Send generic request to FW.
* HL_INFO_HW_ERR_EVENT - Retrieve information on the reported HW error.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
* HL_INFO_FW_ERR_EVENT - Retrieve information on the reported FW error.
+ * May return 0 even though no new data is available, in that case
+ * timestamp will be 0.
+ * HL_INFO_USER_ENGINE_ERR_EVENT - Retrieve the last engine id that reported an error.
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -835,6 +845,8 @@ enum hl_server_type {
#define HL_INFO_FW_GENERIC_REQ 35
#define HL_INFO_HW_ERR_EVENT 36
#define HL_INFO_FW_ERR_EVENT 37
+#define HL_INFO_USER_ENGINE_ERR_EVENT 38
+#define HL_INFO_DEV_SIGNED 40
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@@ -874,11 +886,11 @@ enum hl_server_type {
* @dram_enabled: Whether the DRAM is enabled.
* @security_enabled: Whether security is enabled on device.
* @mme_master_slave_mode: Indicate whether the MME is working in master/slave
- * configuration. Relevant for Greco and later.
+ * configuration. Relevant for Gaudi2 and later.
* @cpucp_version: The CPUCP f/w version.
* @card_name: The card name as passed by the f/w.
* @tpc_enabled_mask_ext: Bit-mask that represents which TPCs are enabled.
- * Relevant for Greco and later.
+ * Relevant for Gaudi2 and later.
* @dram_page_size: The DRAM physical page size.
* @edma_enabled_mask: Bit-mask that represents which EDMAs are enabled.
* Relevant for Gaudi2 and later.
@@ -980,6 +992,7 @@ struct hl_info_reset_count {
struct hl_info_time_sync {
__u64 device_time;
__u64 host_time;
+ __u64 tsc_time;
};
/**
@@ -1217,6 +1230,20 @@ struct hl_info_fw_err_event {
};
/**
+ * struct hl_info_engine_err_event - engine error info
+ * @timestamp: time-stamp of error occurrence
+ * @engine_id: engine id who reported the error.
+ * @error_count: Amount of errors reported.
+ * @pad: size padding for u64 granularity.
+ */
+struct hl_info_engine_err_event {
+ __s64 timestamp;
+ __u16 engine_id;
+ __u16 error_count;
+ __u32 pad;
+};
+
+/**
* struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information.
* @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size
* (e.g. 0x2100000 means that 1MB and 32MB pages are supported).
@@ -1230,6 +1257,7 @@ struct hl_info_dev_memalloc_page_sizes {
#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+#define SEC_DEV_INFO_BUF_SZ 5120
/*
* struct hl_info_sec_attest - attestation report of the boot
@@ -1264,6 +1292,32 @@ struct hl_info_sec_attest {
__u8 pad0[2];
};
+/*
+ * struct hl_info_signed - device information signed by a secured device.
+ * @nonce: number only used once. random number provided by host. this also passed to the quote
+ * command as a qualifying data.
+ * @pub_data_len: length of the public data (bytes)
+ * @certificate_len: length of the certificate (bytes)
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @public_data: public key info signed info data (outPublic + name + qualifiedName)
+ * @certificate: certificate for the signing key
+ * @info_sig: signature of the info + nonce data.
+ * @dev_info_len: length of device info (bytes)
+ * @dev_info: device info as byte array.
+ */
+struct hl_info_signed {
+ __u32 nonce;
+ __u16 pub_data_len;
+ __u16 certificate_len;
+ __u8 info_sig_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __u16 dev_info_len;
+ __u8 dev_info[SEC_DEV_INFO_BUF_SZ];
+ __u8 pad[2];
+};
+
/**
* struct hl_page_fault_info - page fault information.
* @timestamp: timestamp of page fault.
@@ -1399,7 +1453,7 @@ union hl_cb_args {
*
* HL_CS_CHUNK_FLAGS_USER_ALLOC_CB:
* Indicates if the CB was allocated and mapped by userspace
- * (relevant to greco and above). User allocated CB is a command buffer,
+ * (relevant to Gaudi2 and later). User allocated CB is a command buffer,
* allocated by the user, via malloc (or similar). After allocating the
* CB, the user invokes - “memory ioctl” to map the user memory into a
* device virtual address. The user provides this address via the
@@ -1424,7 +1478,7 @@ struct hl_cs_chunk {
* a DRAM address of the internal CB. In Gaudi, this might also
* represent a mapped host address of the CB.
*
- * Greco onwards:
+ * Gaudi2 onwards:
* For H/W queue, this represents either a Handle of CB on the
* Host, or an SRAM, a DRAM, or a mapped host address of the CB.
*
@@ -2137,6 +2191,13 @@ struct hl_debug_args {
__u32 ctx_id;
};
+#define HL_IOCTL_INFO 0x00
+#define HL_IOCTL_CB 0x01
+#define HL_IOCTL_CS 0x02
+#define HL_IOCTL_WAIT_CS 0x03
+#define HL_IOCTL_MEMORY 0x04
+#define HL_IOCTL_DEBUG 0x05
+
/*
* Various information operations such as:
* - H/W IP information
@@ -2151,8 +2212,7 @@ struct hl_debug_args {
* definitions of structures in kernel and userspace, e.g. in case of old
* userspace and new kernel driver
*/
-#define HL_IOCTL_INFO \
- _IOWR('H', 0x01, struct hl_info_args)
+#define DRM_IOCTL_HL_INFO DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_INFO, struct hl_info_args)
/*
* Command Buffer
@@ -2173,8 +2233,7 @@ struct hl_debug_args {
* and won't be returned to user.
*
*/
-#define HL_IOCTL_CB \
- _IOWR('H', 0x02, union hl_cb_args)
+#define DRM_IOCTL_HL_CB DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CB, union hl_cb_args)
/*
* Command Submission
@@ -2196,7 +2255,7 @@ struct hl_debug_args {
* internal. The driver will get completion notifications from the device only
* on JOBS which are enqueued in the external queues.
*
- * Greco onwards:
+ * Gaudi2 onwards:
* There is a single type of queue for all types of engines, either DMA engines
* for transfers from/to the host or inside the device, or compute engines.
* The driver will get completion notifications from the device for all queues.
@@ -2226,8 +2285,7 @@ struct hl_debug_args {
* and only if CS N and CS N-1 are exactly the same (same CBs for the same
* queues).
*/
-#define HL_IOCTL_CS \
- _IOWR('H', 0x03, union hl_cs_args)
+#define DRM_IOCTL_HL_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_CS, union hl_cs_args)
/*
* Wait for Command Submission
@@ -2259,9 +2317,7 @@ struct hl_debug_args {
* HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
* device was reset (EIO)
*/
-
-#define HL_IOCTL_WAIT_CS \
- _IOWR('H', 0x04, union hl_wait_cs_args)
+#define DRM_IOCTL_HL_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_WAIT_CS, union hl_wait_cs_args)
/*
* Memory
@@ -2278,8 +2334,7 @@ struct hl_debug_args {
* There is an option for the user to specify the requested virtual address.
*
*/
-#define HL_IOCTL_MEMORY \
- _IOWR('H', 0x05, union hl_mem_args)
+#define DRM_IOCTL_HL_MEMORY DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_MEMORY, union hl_mem_args)
/*
* Debug
@@ -2305,10 +2360,9 @@ struct hl_debug_args {
* The driver can decide to "kick out" the user if he abuses this interface.
*
*/
-#define HL_IOCTL_DEBUG \
- _IOWR('H', 0x06, struct hl_debug_args)
+#define DRM_IOCTL_HL_DEBUG DRM_IOWR(DRM_COMMAND_BASE + HL_IOCTL_DEBUG, struct hl_debug_args)
-#define HL_COMMAND_START 0x01
-#define HL_COMMAND_END 0x07
+#define HL_COMMAND_START (DRM_COMMAND_BASE + HL_IOCTL_INFO)
+#define HL_COMMAND_END (DRM_COMMAND_BASE + HL_IOCTL_DEBUG + 1)
#endif /* HABANALABS_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index dba7c5a5b25e..fd4f9574d177 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -38,13 +38,13 @@ extern "C" {
*/
/**
- * DOC: uevents generated by i915 on it's device node
+ * DOC: uevents generated by i915 on its device node
*
* I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
- * event from the gpu l3 cache. Additional information supplied is ROW,
+ * event from the GPU L3 cache. Additional information supplied is ROW,
* BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
- * track of these events and if a specific cache-line seems to have a
- * persistent error remap it with the l3 remapping tool supplied in
+ * track of these events, and if a specific cache-line seems to have a
+ * persistent error, remap it with the L3 remapping tool supplied in
* intel-gpu-tools. The value supplied with the event is always 1.
*
* I915_ERROR_UEVENT - Generated upon error detection, currently only via
@@ -280,7 +280,16 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_ENGINE_SEMA(class, instance) \
__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
-#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+/*
+ * Top 4 bits of every non-engine counter are GT id.
+ */
+#define __I915_PMU_GT_SHIFT (60)
+
+#define ___I915_PMU_OTHER(gt, x) \
+ (((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \
+ ((__u64)(gt) << __I915_PMU_GT_SHIFT))
+
+#define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x)
#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
@@ -290,6 +299,12 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
+#define __I915_PMU_ACTUAL_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 0)
+#define __I915_PMU_REQUESTED_FREQUENCY(gt) ___I915_PMU_OTHER(gt, 1)
+#define __I915_PMU_INTERRUPTS(gt) ___I915_PMU_OTHER(gt, 2)
+#define __I915_PMU_RC6_RESIDENCY(gt) ___I915_PMU_OTHER(gt, 3)
+#define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt) ___I915_PMU_OTHER(gt, 4)
+
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -659,7 +674,8 @@ typedef struct drm_i915_irq_wait {
* If the IOCTL is successful, the returned parameter will be set to one of the
* following values:
* * 0 if HuC firmware load is not complete,
- * * 1 if HuC firmware is authenticated and running.
+ * * 1 if HuC firmware is loaded and fully authenticated,
+ * * 2 if HuC firmware is loaded and authenticated for clear media only
*/
#define I915_PARAM_HUC_STATUS 42
@@ -677,7 +693,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXEC_FENCE 44
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
- * user specified bufffers for post-mortem debugging of GPU hangs. See
+ * user-specified buffers for post-mortem debugging of GPU hangs. See
* EXEC_OBJECT_CAPTURE.
*/
#define I915_PARAM_HAS_EXEC_CAPTURE 45
@@ -771,6 +787,25 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
+/*
+ * Query the status of PXP support in i915.
+ *
+ * The query can fail in the following scenarios with the listed error codes:
+ * -ENODEV = PXP support is not available on the GPU device or in the
+ * kernel due to missing component drivers or kernel configs.
+ *
+ * If the IOCTL is successful, the returned parameter will be set to one of
+ * the following values:
+ * 1 = PXP feature is supported and is ready for use.
+ * 2 = PXP feature is supported but should be ready soon (pending
+ * initialization of non-i915 system dependencies).
+ *
+ * NOTE: When param is supported (positive return values), user space should
+ * still refer to the GEM PXP context-creation UAPI header specs to be
+ * aware of possible failure due to system state machine at the time.
+ */
+#define I915_PARAM_PXP_STATUS 58
+
/* Must be kept compact -- no holes and well documented */
/**
@@ -1571,7 +1606,7 @@ struct drm_i915_gem_busy {
* is accurate.
*
* The returned dword is split into two fields to indicate both
- * the engine classess on which the object is being read, and the
+ * the engine classes on which the object is being read, and the
* engine class on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
@@ -1780,7 +1815,7 @@ struct drm_i915_gem_madvise {
__u32 handle;
/* Advice: either the buffer will be needed again in the near future,
- * or wont be and could be discarded under memory pressure.
+ * or won't be and could be discarded under memory pressure.
*/
__u32 madv;
@@ -2096,6 +2131,21 @@ struct drm_i915_gem_context_param {
*
* -ENODEV: feature not available
* -EPERM: trying to mark a recoverable or not bannable context as protected
+ * -ENXIO: A dependency such as a component driver or firmware is not yet
+ * loaded so user space may need to attempt again. Depending on the
+ * device, this error may be reported if protected context creation is
+ * attempted very early after kernel start because the internal timeout
+ * waiting for such dependencies is not guaranteed to be larger than
+ * required (numbers differ depending on system and kernel config):
+ * - ADL/RPL: dependencies may take up to 3 seconds from kernel start
+ * while context creation internal timeout is 250 milisecs
+ * - MTL: dependencies may take up to 8 seconds from kernel start
+ * while context creation internal timeout is 250 milisecs
+ * NOTE: such dependencies happen once, so a subsequent call to create a
+ * protected context after a prior successful call will not experience
+ * such timeouts and will not return -ENXIO (unless the driver is reloaded,
+ * or, depending on the device, resumes from a suspended state).
+ * -EIO: The firmware did not succeed in creating the protected context.
*/
#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/* Must be kept compact -- no holes and well documented */
@@ -3196,7 +3246,7 @@ struct drm_i915_query_topology_info {
* // enough to hold our array of engines. The kernel will fill out the
* // item.length for us, which is the number of bytes we need.
* //
- * // Alternatively a large buffer can be allocated straight away enabling
+ * // Alternatively a large buffer can be allocated straightaway enabling
* // querying in one pass, in which case item.length should contain the
* // length of the provided buffer.
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
@@ -3206,7 +3256,7 @@ struct drm_i915_query_topology_info {
* // Now that we allocated the required number of bytes, we call the ioctl
* // again, this time with the data_ptr pointing to our newly allocated
* // blob, which the kernel can then populate with info on all engines.
- * item.data_ptr = (uintptr_t)&info,
+ * item.data_ptr = (uintptr_t)&info;
*
* err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
* if (err) ...
@@ -3236,7 +3286,7 @@ struct drm_i915_query_topology_info {
/**
* struct drm_i915_engine_info
*
- * Describes one engine and it's capabilities as known to the driver.
+ * Describes one engine and its capabilities as known to the driver.
*/
struct drm_i915_engine_info {
/** @engine: Engine class and instance. */
@@ -3630,9 +3680,13 @@ struct drm_i915_gem_create_ext {
*
* For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
* struct drm_i915_gem_create_ext_protected_content.
+ *
+ * For I915_GEM_CREATE_EXT_SET_PAT usage see
+ * struct drm_i915_gem_create_ext_set_pat.
*/
#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
+#define I915_GEM_CREATE_EXT_SET_PAT 2
__u64 extensions;
};
@@ -3747,6 +3801,43 @@ struct drm_i915_gem_create_ext_protected_content {
__u32 flags;
};
+/**
+ * struct drm_i915_gem_create_ext_set_pat - The
+ * I915_GEM_CREATE_EXT_SET_PAT extension.
+ *
+ * If this extension is provided, the specified caching policy (PAT index) is
+ * applied to the buffer object.
+ *
+ * Below is an example on how to create an object with specific caching policy:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_create_ext_set_pat set_pat_ext = {
+ * .base = { .name = I915_GEM_CREATE_EXT_SET_PAT },
+ * .pat_index = 0,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = PAGE_SIZE,
+ * .extensions = (uintptr_t)&set_pat_ext,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ */
+struct drm_i915_gem_create_ext_set_pat {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+ /**
+ * @pat_index: PAT index to be set
+ * PAT index is a bit field in Page Table Entry to control caching
+ * behaviors for GPU accesses. The definition of PAT index is
+ * platform dependent and can be found in hardware specifications,
+ */
+ __u32 pat_index;
+ /** @rsvd: reserved for future use */
+ __u32 rsvd;
+};
+
/* ID of the protected content session managed by i915 when PXP is active */
#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 839820aed87e..19a13468eca5 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -53,21 +53,44 @@ extern "C" {
#define DRM_IVPU_PARAM_CORE_CLOCK_RATE 3
#define DRM_IVPU_PARAM_NUM_CONTEXTS 4
#define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
-#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6
+#define DRM_IVPU_PARAM_CONTEXT_PRIORITY 6 /* Deprecated */
#define DRM_IVPU_PARAM_CONTEXT_ID 7
#define DRM_IVPU_PARAM_FW_API_VERSION 8
#define DRM_IVPU_PARAM_ENGINE_HEARTBEAT 9
#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
#define DRM_IVPU_PARAM_TILE_CONFIG 11
#define DRM_IVPU_PARAM_SKU 12
+#define DRM_IVPU_PARAM_CAPABILITIES 13
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
+/* Deprecated, use DRM_IVPU_JOB_PRIORITY */
#define DRM_IVPU_CONTEXT_PRIORITY_IDLE 0
#define DRM_IVPU_CONTEXT_PRIORITY_NORMAL 1
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
+#define DRM_IVPU_JOB_PRIORITY_DEFAULT 0
+#define DRM_IVPU_JOB_PRIORITY_IDLE 1
+#define DRM_IVPU_JOB_PRIORITY_NORMAL 2
+#define DRM_IVPU_JOB_PRIORITY_FOCUS 3
+#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
+
+/**
+ * DRM_IVPU_CAP_METRIC_STREAMER
+ *
+ * Metric streamer support. Provides sampling of various hardware performance
+ * metrics like DMA bandwidth and cache miss/hits. Can be used for profiling.
+ */
+#define DRM_IVPU_CAP_METRIC_STREAMER 1
+/**
+ * DRM_IVPU_CAP_DMA_MEMORY_RANGE
+ *
+ * Driver has capability to allocate separate memory range
+ * accessible by hardware DMA.
+ */
+#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2
+
/**
* struct drm_ivpu_param - Get/Set VPU parameters
*/
@@ -96,10 +119,6 @@ struct drm_ivpu_param {
* %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
* Lowest VPU virtual address available in the current context (read-only)
*
- * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
- * Value of current context scheduling priority (read-write).
- * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
- *
* %DRM_IVPU_PARAM_CONTEXT_ID:
* Current context ID, always greater than 0 (read-only)
*
@@ -119,6 +138,8 @@ struct drm_ivpu_param {
* %DRM_IVPU_PARAM_SKU:
* VPU SKU ID (read-only)
*
+ * %DRM_IVPU_PARAM_CAPABILITIES:
+ * Supported capabilities (read-only)
*/
__u32 param;
@@ -129,8 +150,10 @@ struct drm_ivpu_param {
__u64 value;
};
-#define DRM_IVPU_BO_HIGH_MEM 0x00000001
+#define DRM_IVPU_BO_SHAVE_MEM 0x00000001
+#define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
#define DRM_IVPU_BO_MAPPABLE 0x00000002
+#define DRM_IVPU_BO_DMA_MEM 0x00000004
#define DRM_IVPU_BO_CACHED 0x00000000
#define DRM_IVPU_BO_UNCACHED 0x00010000
@@ -140,6 +163,7 @@ struct drm_ivpu_param {
#define DRM_IVPU_BO_FLAGS \
(DRM_IVPU_BO_HIGH_MEM | \
DRM_IVPU_BO_MAPPABLE | \
+ DRM_IVPU_BO_DMA_MEM | \
DRM_IVPU_BO_CACHE_MASK)
/**
@@ -175,7 +199,7 @@ struct drm_ivpu_bo_create {
*
* %DRM_IVPU_BO_UNCACHED:
*
- * Allocated BO will not be cached on host side nor snooped on the VPU side.
+ * Not supported. Use DRM_IVPU_BO_WC instead.
*
* %DRM_IVPU_BO_WC:
*
@@ -265,10 +289,23 @@ struct drm_ivpu_submit {
* to be executed. The offset has to be 8-byte aligned.
*/
__u32 commands_offset;
+
+ /**
+ * @priority:
+ *
+ * Priority to be set for related job command queue, can be one of the following:
+ * %DRM_IVPU_JOB_PRIORITY_DEFAULT
+ * %DRM_IVPU_JOB_PRIORITY_IDLE
+ * %DRM_IVPU_JOB_PRIORITY_NORMAL
+ * %DRM_IVPU_JOB_PRIORITY_FOCUS
+ * %DRM_IVPU_JOB_PRIORITY_REALTIME
+ */
+ __u32 priority;
};
/* drm_ivpu_bo_wait job status codes */
#define DRM_IVPU_JOB_STATUS_SUCCESS 0
+#define DRM_IVPU_JOB_STATUS_ABORTED 256
/**
* struct drm_ivpu_bo_wait - Wait for BO to become inactive
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 6c34272a13fd..d8a6b3472760 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -86,6 +86,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
+#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@@ -139,6 +140,8 @@ struct drm_msm_gem_new {
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
#define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */
+#define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */
+#define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */
struct drm_msm_gem_info {
__u32 handle; /* in */
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 853a327433d3..77d7ff0d5b11 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -33,11 +33,75 @@
extern "C" {
#endif
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
+#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+
+/*
+ * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
+ *
+ * Query the maximum amount of IBs that can be pushed through a single
+ * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
+ * ioctl().
+ */
+#define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
+
+/*
+ * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size
+ *
+ * Query the VRAM BAR size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
+
+/*
+ * NOUVEAU_GETPARAM_VRAM_USED
+ *
+ * Get remaining VRAM size.
+ */
+#define NOUVEAU_GETPARAM_VRAM_USED 19
+
+struct drm_nouveau_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_nouveau_channel_alloc {
+ __u32 fb_ctxdma_handle;
+ __u32 tt_ctxdma_handle;
+
+ __s32 channel;
+ __u32 pushbuf_domains;
+
+ /* Notifier memory */
+ __u32 notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ __u32 handle;
+ __u32 grclass;
+ } subchan[8];
+ __u32 nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ __s32 channel;
+};
+
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
+/* The BO will never be shared via import or export. */
+#define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
@@ -98,6 +162,7 @@ struct drm_nouveau_gem_pushbuf_push {
__u32 pad;
__u64 offset;
__u64 length;
+#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
};
struct drm_nouveau_gem_pushbuf {
@@ -126,16 +191,233 @@ struct drm_nouveau_gem_cpu_fini {
__u32 handle;
};
-#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
+/**
+ * struct drm_nouveau_sync - sync object
+ *
+ * This structure serves as synchronization mechanism for (potentially)
+ * asynchronous operations such as EXEC or VM_BIND.
+ */
+struct drm_nouveau_sync {
+ /**
+ * @flags: the flags for a sync object
+ *
+ * The first 8 bits are used to determine the type of the sync object.
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
+#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
+#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
+ /**
+ * @handle: the handle of the sync object
+ */
+ __u32 handle;
+ /**
+ * @timeline_value:
+ *
+ * The timeline point of the sync object in case the syncobj is of
+ * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+};
+
+/**
+ * struct drm_nouveau_vm_init - GPU VA space init structure
+ *
+ * Used to initialize the GPU's VA space for a user client, telling the kernel
+ * which portion of the VA space is managed by the UMD and kernel respectively.
+ *
+ * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
+ * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
+ * with -ENOSYS.
+ */
+struct drm_nouveau_vm_init {
+ /**
+ * @kernel_managed_addr: start address of the kernel managed VA space
+ * region
+ */
+ __u64 kernel_managed_addr;
+ /**
+ * @kernel_managed_size: size of the kernel managed VA space region in
+ * bytes
+ */
+ __u64 kernel_managed_size;
+};
+
+/**
+ * struct drm_nouveau_vm_bind_op - VM_BIND operation
+ *
+ * This structure represents a single VM_BIND operation. UMDs should pass
+ * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
+ */
+struct drm_nouveau_vm_bind_op {
+ /**
+ * @op: the operation type
+ */
+ __u32 op;
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_MAP:
+ *
+ * Map a GEM object to the GPU's VA space. Optionally, the
+ * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
+ * create sparse mappings for the given range.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
+ *
+ * Unmap an existing mapping in the GPU's VA space. If the region the mapping
+ * is located in is a sparse region, new sparse mappings are created where the
+ * unmapped (memory backed) mapping was mapped previously. To remove a sparse
+ * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind_op
+ */
+ __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_SPARSE:
+ *
+ * Indicates that an allocated VA space region should be sparse.
+ */
+#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
+ /**
+ * @handle: the handle of the DRM GEM object to map
+ */
+ __u32 handle;
+ /**
+ * @pad: 32 bit padding, should be 0
+ */
+ __u32 pad;
+ /**
+ * @addr:
+ *
+ * the address the VA space region or (memory backed) mapping should be mapped to
+ */
+ __u64 addr;
+ /**
+ * @bo_offset: the offset within the BO backing the mapping
+ */
+ __u64 bo_offset;
+ /**
+ * @range: the size of the requested mapping in bytes
+ */
+ __u64 range;
+};
+
+/**
+ * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
+ */
+struct drm_nouveau_vm_bind {
+ /**
+ * @op_count: the number of &drm_nouveau_vm_bind_op
+ */
+ __u32 op_count;
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind ioctl
+ */
+ __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
+ *
+ * Indicates that the given VM_BIND operation should be executed asynchronously
+ * by the kernel.
+ *
+ * If this flag is not supplied the kernel executes the associated operations
+ * synchronously and doesn't accept any &drm_nouveau_sync objects.
+ */
+#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
+ */
+ __u64 op_ptr;
+};
+
+/**
+ * struct drm_nouveau_exec_push - EXEC push operation
+ *
+ * This structure represents a single EXEC push operation. UMDs should pass an
+ * array of this structure via struct drm_nouveau_exec's &push_ptr field.
+ */
+struct drm_nouveau_exec_push {
+ /**
+ * @va: the virtual address of the push buffer mapping
+ */
+ __u64 va;
+ /**
+ * @va_len: the length of the push buffer mapping
+ */
+ __u32 va_len;
+ /**
+ * @flags: the flags for this push buffer mapping
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
+};
+
+/**
+ * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
+ */
+struct drm_nouveau_exec {
+ /**
+ * @channel: the channel to execute the push buffer in
+ */
+ __u32 channel;
+ /**
+ * @push_count: the number of &drm_nouveau_exec_push ops
+ */
+ __u32 push_count;
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @push_ptr: pointer to &drm_nouveau_exec_push ops
+ */
+ __u64 push_ptr;
+};
+
+#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
#define DRM_NOUVEAU_SVM_INIT 0x08
#define DRM_NOUVEAU_SVM_BIND 0x09
+#define DRM_NOUVEAU_VM_INIT 0x10
+#define DRM_NOUVEAU_VM_BIND 0x11
+#define DRM_NOUVEAU_EXEC 0x12
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@@ -188,6 +470,10 @@ struct drm_nouveau_svm_bind {
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
+#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
@@ -197,6 +483,9 @@ struct drm_nouveau_svm_bind {
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
+#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
+#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h
new file mode 100644
index 000000000000..ccf6c2112468
--- /dev/null
+++ b/include/uapi/drm/pvr_drm.h
@@ -0,0 +1,1295 @@
+/* SPDX-License-Identifier: (GPL-2.0-only WITH Linux-syscall-note) OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DRM_UAPI_H
+#define PVR_DRM_UAPI_H
+
+#include "drm.h"
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: PowerVR UAPI
+ *
+ * The PowerVR IOCTL argument structs have a few limitations in place, in
+ * addition to the standard kernel restrictions:
+ *
+ * - All members must be type-aligned.
+ * - The overall struct must be padded to 64-bit alignment.
+ * - Explicit padding is almost always required. This takes the form of
+ * ``_padding_[x]`` members of sufficient size to pad to the next power-of-two
+ * alignment, where [x] is the offset into the struct in hexadecimal. Arrays
+ * are never used for alignment. Padding fields must be zeroed; this is
+ * always checked.
+ * - Unions may only appear as the last member of a struct.
+ * - Individual union members may grow in the future. The space between the
+ * end of a union member and the end of its containing union is considered
+ * "implicit padding" and must be zeroed. This is always checked.
+ *
+ * In addition to the IOCTL argument structs, the PowerVR UAPI makes use of
+ * DEV_QUERY argument structs. These are used to fetch information about the
+ * device and runtime. These structs are subject to the same rules set out
+ * above.
+ */
+
+/**
+ * struct drm_pvr_obj_array - Container used to pass arrays of objects
+ *
+ * It is not unusual to have to extend objects to pass new parameters, and the DRM
+ * ioctl infrastructure is supporting that by padding ioctl arguments with zeros
+ * when the data passed by userspace is smaller than the struct defined in the
+ * drm_ioctl_desc, thus keeping things backward compatible. This type is just
+ * applying the same concepts to indirect objects passed through arrays referenced
+ * from the main ioctl arguments structure: the stride basically defines the size
+ * of the object passed by userspace, which allows the kernel driver to pad with
+ * zeros when it's smaller than the size of the object it expects.
+ *
+ * Use ``DRM_PVR_OBJ_ARRAY()`` to fill object array fields, unless you
+ * have a very good reason not to.
+ */
+struct drm_pvr_obj_array {
+ /** @stride: Stride of object struct. Used for versioning. */
+ __u32 stride;
+
+ /** @count: Number of objects in the array. */
+ __u32 count;
+
+ /** @array: User pointer to an array of objects. */
+ __u64 array;
+};
+
+/**
+ * DRM_PVR_OBJ_ARRAY() - Helper macro for filling &struct drm_pvr_obj_array.
+ * @cnt: Number of elements pointed to py @ptr.
+ * @ptr: Pointer to start of a C array.
+ *
+ * Return: Literal of type &struct drm_pvr_obj_array.
+ */
+#define DRM_PVR_OBJ_ARRAY(cnt, ptr) \
+ { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
+
+/**
+ * DOC: PowerVR IOCTL interface
+ */
+
+/**
+ * PVR_IOCTL() - Build a PowerVR IOCTL number
+ * @_ioctl: An incrementing id for this IOCTL. Added to %DRM_COMMAND_BASE.
+ * @_mode: Must be one of %DRM_IOR, %DRM_IOW or %DRM_IOWR.
+ * @_data: The type of the args struct passed by this IOCTL.
+ *
+ * The struct referred to by @_data must have a ``drm_pvr_ioctl_`` prefix and an
+ * ``_args suffix``. They are therefore omitted from @_data.
+ *
+ * This should only be used to build the constants described below; it should
+ * never be used to call an IOCTL directly.
+ *
+ * Return: An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define PVR_IOCTL(_ioctl, _mode, _data) \
+ _mode(DRM_COMMAND_BASE + (_ioctl), struct drm_pvr_ioctl_##_data##_args)
+
+#define DRM_IOCTL_PVR_DEV_QUERY PVR_IOCTL(0x00, DRM_IOWR, dev_query)
+#define DRM_IOCTL_PVR_CREATE_BO PVR_IOCTL(0x01, DRM_IOWR, create_bo)
+#define DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET PVR_IOCTL(0x02, DRM_IOWR, get_bo_mmap_offset)
+#define DRM_IOCTL_PVR_CREATE_VM_CONTEXT PVR_IOCTL(0x03, DRM_IOWR, create_vm_context)
+#define DRM_IOCTL_PVR_DESTROY_VM_CONTEXT PVR_IOCTL(0x04, DRM_IOW, destroy_vm_context)
+#define DRM_IOCTL_PVR_VM_MAP PVR_IOCTL(0x05, DRM_IOW, vm_map)
+#define DRM_IOCTL_PVR_VM_UNMAP PVR_IOCTL(0x06, DRM_IOW, vm_unmap)
+#define DRM_IOCTL_PVR_CREATE_CONTEXT PVR_IOCTL(0x07, DRM_IOWR, create_context)
+#define DRM_IOCTL_PVR_DESTROY_CONTEXT PVR_IOCTL(0x08, DRM_IOW, destroy_context)
+#define DRM_IOCTL_PVR_CREATE_FREE_LIST PVR_IOCTL(0x09, DRM_IOWR, create_free_list)
+#define DRM_IOCTL_PVR_DESTROY_FREE_LIST PVR_IOCTL(0x0a, DRM_IOW, destroy_free_list)
+#define DRM_IOCTL_PVR_CREATE_HWRT_DATASET PVR_IOCTL(0x0b, DRM_IOWR, create_hwrt_dataset)
+#define DRM_IOCTL_PVR_DESTROY_HWRT_DATASET PVR_IOCTL(0x0c, DRM_IOW, destroy_hwrt_dataset)
+#define DRM_IOCTL_PVR_SUBMIT_JOBS PVR_IOCTL(0x0d, DRM_IOW, submit_jobs)
+
+/**
+ * DOC: PowerVR IOCTL DEV_QUERY interface
+ */
+
+/**
+ * struct drm_pvr_dev_query_gpu_info - Container used to fetch information about
+ * the graphics processor.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_GPU_INFO_GET.
+ */
+struct drm_pvr_dev_query_gpu_info {
+ /**
+ * @gpu_id: GPU identifier.
+ *
+ * For all currently supported GPUs this is the BVNC encoded as a 64-bit
+ * value as follows:
+ *
+ * +--------+--------+--------+-------+
+ * | 63..48 | 47..32 | 31..16 | 15..0 |
+ * +========+========+========+=======+
+ * | B | V | N | C |
+ * +--------+--------+--------+-------+
+ */
+ __u64 gpu_id;
+
+ /**
+ * @num_phantoms: Number of Phantoms present.
+ */
+ __u32 num_phantoms;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * struct drm_pvr_dev_query_runtime_info - Container used to fetch information
+ * about the graphics runtime.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET.
+ */
+struct drm_pvr_dev_query_runtime_info {
+ /**
+ * @free_list_min_pages: Minimum allowed free list size,
+ * in PM physical pages.
+ */
+ __u64 free_list_min_pages;
+
+ /**
+ * @free_list_max_pages: Maximum allowed free list size,
+ * in PM physical pages.
+ */
+ __u64 free_list_max_pages;
+
+ /**
+ * @common_store_alloc_region_size: Size of the Allocation
+ * Region within the Common Store used for coefficient and shared
+ * registers, in dwords.
+ */
+ __u32 common_store_alloc_region_size;
+
+ /**
+ * @common_store_partition_space_size: Size of the
+ * Partition Space within the Common Store for output buffers, in
+ * dwords.
+ */
+ __u32 common_store_partition_space_size;
+
+ /**
+ * @max_coeffs: Maximum coefficients, in dwords.
+ */
+ __u32 max_coeffs;
+
+ /**
+ * @cdm_max_local_mem_size_regs: Maximum amount of local
+ * memory available to a compute kernel, in dwords.
+ */
+ __u32 cdm_max_local_mem_size_regs;
+};
+
+/**
+ * struct drm_pvr_dev_query_quirks - Container used to fetch information about
+ * hardware fixes for which the device may require support in the user mode
+ * driver.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_QUIRKS_GET.
+ */
+struct drm_pvr_dev_query_quirks {
+ /**
+ * @quirks: A userspace address for the hardware quirks __u32 array.
+ *
+ * The first @musthave_count items in the list are quirks that the
+ * client must support for this device. If userspace does not support
+ * all these quirks then functionality is not guaranteed and client
+ * initialisation must fail.
+ * The remaining quirks in the list affect userspace and the kernel or
+ * firmware. They are disabled by default and require userspace to
+ * opt-in. The opt-in mechanism depends on the quirk.
+ */
+ __u64 quirks;
+
+ /** @count: Length of @quirks (number of __u32). */
+ __u16 count;
+
+ /**
+ * @musthave_count: The number of entries in @quirks that are
+ * mandatory, starting at index 0.
+ */
+ __u16 musthave_count;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * struct drm_pvr_dev_query_enhancements - Container used to fetch information
+ * about optional enhancements supported by the device that require support in
+ * the user mode driver.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_ENHANCEMENTS_GET.
+ */
+struct drm_pvr_dev_query_enhancements {
+ /**
+ * @enhancements: A userspace address for the hardware enhancements
+ * __u32 array.
+ *
+ * These enhancements affect userspace and the kernel or firmware. They
+ * are disabled by default and require userspace to opt-in. The opt-in
+ * mechanism depends on the enhancement.
+ */
+ __u64 enhancements;
+
+ /** @count: Length of @enhancements (number of __u32). */
+ __u16 count;
+
+ /** @_padding_a: Reserved. This field must be zeroed. */
+ __u16 _padding_a;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * enum drm_pvr_heap_id - Array index for heap info data returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * For compatibility reasons all indices will be present in the returned array,
+ * however some heaps may not be present. These are indicated where
+ * &struct drm_pvr_heap.size is set to zero.
+ */
+enum drm_pvr_heap_id {
+ /** @DRM_PVR_HEAP_GENERAL: General purpose heap. */
+ DRM_PVR_HEAP_GENERAL = 0,
+ /** @DRM_PVR_HEAP_PDS_CODE_DATA: PDS code and data heap. */
+ DRM_PVR_HEAP_PDS_CODE_DATA,
+ /** @DRM_PVR_HEAP_USC_CODE: USC code heap. */
+ DRM_PVR_HEAP_USC_CODE,
+ /** @DRM_PVR_HEAP_RGNHDR: Region header heap. Only used if GPU has BRN63142. */
+ DRM_PVR_HEAP_RGNHDR,
+ /** @DRM_PVR_HEAP_VIS_TEST: Visibility test heap. */
+ DRM_PVR_HEAP_VIS_TEST,
+ /** @DRM_PVR_HEAP_TRANSFER_FRAG: Transfer fragment heap. */
+ DRM_PVR_HEAP_TRANSFER_FRAG,
+
+ /**
+ * @DRM_PVR_HEAP_COUNT: The number of heaps returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * More heaps may be added, so this also serves as the copy limit when
+ * sent by the caller.
+ */
+ DRM_PVR_HEAP_COUNT
+ /* Please only add additional heaps above DRM_PVR_HEAP_COUNT! */
+};
+
+/**
+ * struct drm_pvr_heap - Container holding information about a single heap.
+ *
+ * This will always be fetched as an array.
+ */
+struct drm_pvr_heap {
+ /** @base: Base address of heap. */
+ __u64 base;
+
+ /** @size: Size of heap, in bytes. Will be 0 if the heap is not present. */
+ __u64 size;
+
+ /** @flags: Flags for this heap. Currently always 0. */
+ __u32 flags;
+
+ /** @page_size_log2: Log2 of page size. */
+ __u32 page_size_log2;
+};
+
+/**
+ * struct drm_pvr_dev_query_heap_info - Container used to fetch information
+ * about heaps supported by the device driver.
+ *
+ * Please note all driver-supported heaps will be returned up to &heaps.count.
+ * Some heaps will not be present in all devices, which will be indicated by
+ * &struct drm_pvr_heap.size being set to zero.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+struct drm_pvr_dev_query_heap_info {
+ /**
+ * @heaps: Array of &struct drm_pvr_heap. If pointer is NULL, the count
+ * and stride will be updated with those known to the driver version, to
+ * facilitate allocation by the caller.
+ */
+ struct drm_pvr_obj_array heaps;
+};
+
+/**
+ * enum drm_pvr_static_data_area_usage - Array index for static data area info
+ * returned by %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
+ *
+ * For compatibility reasons all indices will be present in the returned array,
+ * however some areas may not be present. These are indicated where
+ * &struct drm_pvr_static_data_area.size is set to zero.
+ */
+enum drm_pvr_static_data_area_usage {
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_EOT: End of Tile PDS program code segment.
+ *
+ * The End of Tile PDS task runs at completion of a tile during a fragment job, and is
+ * responsible for emitting the tile to the Pixel Back End.
+ */
+ DRM_PVR_STATIC_DATA_AREA_EOT = 0,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_FENCE: MCU fence area, used during cache flush and
+ * invalidation.
+ *
+ * This must point to valid physical memory but the contents otherwise are not used.
+ */
+ DRM_PVR_STATIC_DATA_AREA_FENCE,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_VDM_SYNC: VDM sync program.
+ *
+ * The VDM sync program is used to synchronise multiple areas of the GPU hardware.
+ */
+ DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_YUV_CSC: YUV coefficients.
+ *
+ * Area contains up to 16 slots with stride of 64 bytes. Each is a 3x4 matrix of u16 fixed
+ * point numbers, with 1 sign bit, 2 integer bits and 13 fractional bits.
+ *
+ * The slots are :
+ * 0 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR
+ * 1 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (full range)
+ * 2 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (conformant range)
+ * 3 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (full range)
+ * 4 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range)
+ * 5 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (full range)
+ * 6 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range)
+ * 7 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (full range)
+ * 8 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range)
+ * 9 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range, 10 bit)
+ * 10 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range, 10 bit)
+ * 11 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range, 10 bit)
+ * 14 = Identity (biased)
+ * 15 = Identity
+ */
+ DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+};
+
+/**
+ * struct drm_pvr_static_data_area - Container holding information about a
+ * single static data area.
+ *
+ * This will always be fetched as an array.
+ */
+struct drm_pvr_static_data_area {
+ /**
+ * @area_usage: Usage of static data area.
+ * See &enum drm_pvr_static_data_area_usage.
+ */
+ __u16 area_usage;
+
+ /**
+ * @location_heap_id: Array index of heap where this of static data
+ * area is located. This array is fetched using
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+ __u16 location_heap_id;
+
+ /** @size: Size of static data area. Not present if set to zero. */
+ __u32 size;
+
+ /** @offset: Offset of static data area from start of heap. */
+ __u64 offset;
+};
+
+/**
+ * struct drm_pvr_dev_query_static_data_areas - Container used to fetch
+ * information about the static data areas in heaps supported by the device
+ * driver.
+ *
+ * Please note all driver-supported static data areas will be returned up to
+ * &static_data_areas.count. Some will not be present for all devices which,
+ * will be indicated by &struct drm_pvr_static_data_area.size being set to zero.
+ *
+ * Further, some heaps will not be present either. See &struct
+ * drm_pvr_dev_query_heap_info.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
+ */
+struct drm_pvr_dev_query_static_data_areas {
+ /**
+ * @static_data_areas: Array of &struct drm_pvr_static_data_area. If
+ * pointer is NULL, the count and stride will be updated with those
+ * known to the driver version, to facilitate allocation by the caller.
+ */
+ struct drm_pvr_obj_array static_data_areas;
+};
+
+/**
+ * enum drm_pvr_dev_query - For use with &drm_pvr_ioctl_dev_query_args.type to
+ * indicate the type of the receiving container.
+ *
+ * Append only. Do not reorder.
+ */
+enum drm_pvr_dev_query {
+ /**
+ * @DRM_PVR_DEV_QUERY_GPU_INFO_GET: The dev query args contain a pointer
+ * to &struct drm_pvr_dev_query_gpu_info.
+ */
+ DRM_PVR_DEV_QUERY_GPU_INFO_GET = 0,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_runtime_info.
+ */
+ DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_QUIRKS_GET: The dev query args contain a pointer
+ * to &struct drm_pvr_dev_query_quirks.
+ */
+ DRM_PVR_DEV_QUERY_QUIRKS_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_enhancements.
+ */
+ DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_HEAP_INFO_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_heap_info.
+ */
+ DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET: The dev query args contain
+ * a pointer to &struct drm_pvr_dev_query_static_data_areas.
+ */
+ DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
+};
+
+/**
+ * struct drm_pvr_ioctl_dev_query_args - Arguments for %DRM_IOCTL_PVR_DEV_QUERY.
+ */
+struct drm_pvr_ioctl_dev_query_args {
+ /**
+ * @type: Type of query and output struct. See &enum drm_pvr_dev_query.
+ */
+ __u32 type;
+
+ /**
+ * @size: Size of the receiving struct, see @type.
+ *
+ * After a successful call this will be updated to the written byte
+ * length.
+ * Can also be used to get the minimum byte length (see @pointer).
+ * This allows additional fields to be appended to the structs in
+ * future.
+ */
+ __u32 size;
+
+ /**
+ * @pointer: Pointer to struct @type.
+ *
+ * Must be large enough to contain @size bytes.
+ * If pointer is NULL, the expected size will be returned in the @size
+ * field, but no other data will be written.
+ */
+ __u64 pointer;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_BO interface
+ */
+
+/**
+ * DOC: Flags for CREATE_BO
+ *
+ * We use "device" to refer to the GPU here because of the ambiguity between CPU and GPU in some
+ * fonts.
+ *
+ * Device mapping options
+ * :DRM_PVR_BO_BYPASS_DEVICE_CACHE: Specify that device accesses to this memory will bypass the
+ * cache. This is used for buffers that will either be regularly updated by the CPU (eg free
+ * lists) or will be accessed only once and therefore isn't worth caching (eg partial render
+ * buffers).
+ * By default, the device flushes its memory caches after every job, so this is not normally
+ * required for coherency.
+ * :DRM_PVR_BO_PM_FW_PROTECT: Specify that only the Parameter Manager (PM) and/or firmware
+ * processor should be allowed to access this memory when mapped to the device. It is not
+ * valid to specify this flag with DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS.
+ *
+ * CPU mapping options
+ * :DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS: Allow userspace to map and access the contents of this
+ * memory. It is not valid to specify this flag with DRM_PVR_BO_PM_FW_PROTECT.
+ */
+#define DRM_PVR_BO_BYPASS_DEVICE_CACHE _BITULL(0)
+#define DRM_PVR_BO_PM_FW_PROTECT _BITULL(1)
+#define DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS _BITULL(2)
+/* Bits 3..63 are reserved. */
+
+#define DRM_PVR_BO_FLAGS_MASK (DRM_PVR_BO_BYPASS_DEVICE_CACHE | DRM_PVR_BO_PM_FW_PROTECT | \
+ DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS)
+
+/**
+ * struct drm_pvr_ioctl_create_bo_args - Arguments for %DRM_IOCTL_PVR_CREATE_BO
+ */
+struct drm_pvr_ioctl_create_bo_args {
+ /**
+ * @size: [IN] Size of buffer object to create. This must be page size
+ * aligned.
+ */
+ __u64 size;
+
+ /**
+ * @handle: [OUT] GEM handle of the new buffer object for use in
+ * userspace.
+ */
+ __u32 handle;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+
+ /**
+ * @flags: [IN] Options which will affect the behaviour of this
+ * creation operation and future mapping operations on the created
+ * object. This field must be a valid combination of ``DRM_PVR_BO_*``
+ * values, with all bits marked as reserved set to zero.
+ */
+ __u64 flags;
+};
+
+/**
+ * DOC: PowerVR IOCTL GET_BO_MMAP_OFFSET interface
+ */
+
+/**
+ * struct drm_pvr_ioctl_get_bo_mmap_offset_args - Arguments for
+ * %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET
+ *
+ * Like other DRM drivers, the "mmap" IOCTL doesn't actually map any memory.
+ * Instead, it allocates a fake offset which refers to the specified buffer
+ * object. This offset can be used with a real mmap call on the DRM device
+ * itself.
+ */
+struct drm_pvr_ioctl_get_bo_mmap_offset_args {
+ /** @handle: [IN] GEM handle of the buffer object to be mapped. */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+
+ /** @offset: [OUT] Fake offset to use in the real mmap call. */
+ __u64 offset;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_VM_CONTEXT and DESTROY_VM_CONTEXT interfaces
+ */
+
+/**
+ * struct drm_pvr_ioctl_create_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_create_vm_context_args {
+ /** @handle: [OUT] Handle for new VM context. */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_vm_context_args {
+ /**
+ * @handle: [IN] Handle for VM context to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL VM_MAP and VM_UNMAP interfaces
+ *
+ * The VM UAPI allows userspace to create buffer object mappings in GPU virtual address space.
+ *
+ * The client is responsible for managing GPU address space. It should allocate mappings within
+ * the heaps returned by %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * %DRM_IOCTL_PVR_VM_MAP creates a new mapping. The client provides the target virtual address for
+ * the mapping. Size and offset within the mapped buffer object can be specified, so the client can
+ * partially map a buffer.
+ *
+ * %DRM_IOCTL_PVR_VM_UNMAP removes a mapping. The entire mapping will be removed from GPU address
+ * space only if the size of the mapping matches that known to the driver.
+ */
+
+/**
+ * struct drm_pvr_ioctl_vm_map_args - Arguments for %DRM_IOCTL_PVR_VM_MAP.
+ */
+struct drm_pvr_ioctl_vm_map_args {
+ /**
+ * @vm_context_handle: [IN] Handle for VM context for this mapping to
+ * exist in.
+ */
+ __u32 vm_context_handle;
+
+ /** @flags: [IN] Flags which affect this mapping. Currently always 0. */
+ __u32 flags;
+
+ /**
+ * @device_addr: [IN] Requested device-virtual address for the mapping.
+ * This must be non-zero and aligned to the device page size for the
+ * heap containing the requested address. It is an error to specify an
+ * address which is not contained within one of the heaps returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+ __u64 device_addr;
+
+ /**
+ * @handle: [IN] Handle of the target buffer object. This must be a
+ * valid handle returned by %DRM_IOCTL_PVR_CREATE_BO.
+ */
+ __u32 handle;
+
+ /** @_padding_14: Reserved. This field must be zeroed. */
+ __u32 _padding_14;
+
+ /**
+ * @offset: [IN] Offset into the target bo from which to begin the
+ * mapping.
+ */
+ __u64 offset;
+
+ /**
+ * @size: [IN] Size of the requested mapping. Must be aligned to
+ * the device page size for the heap containing the requested address,
+ * as well as the host page size. When added to @device_addr, the
+ * result must not overflow the heap which contains @device_addr (i.e.
+ * the range specified by @device_addr and @size must be completely
+ * contained within a single heap specified by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET).
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_pvr_ioctl_vm_unmap_args - Arguments for %DRM_IOCTL_PVR_VM_UNMAP.
+ */
+struct drm_pvr_ioctl_vm_unmap_args {
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that this mapping
+ * exists in.
+ */
+ __u32 vm_context_handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+
+ /**
+ * @device_addr: [IN] Device-virtual address at the start of the target
+ * mapping. This must be non-zero.
+ */
+ __u64 device_addr;
+
+ /**
+ * @size: Size in bytes of the target mapping. This must be non-zero.
+ */
+ __u64 size;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_CONTEXT and DESTROY_CONTEXT interfaces
+ */
+
+/**
+ * enum drm_pvr_ctx_priority - Arguments for
+ * &drm_pvr_ioctl_create_context_args.priority
+ */
+enum drm_pvr_ctx_priority {
+ /** @DRM_PVR_CTX_PRIORITY_LOW: Priority below normal. */
+ DRM_PVR_CTX_PRIORITY_LOW = -512,
+
+ /** @DRM_PVR_CTX_PRIORITY_NORMAL: Normal priority. */
+ DRM_PVR_CTX_PRIORITY_NORMAL = 0,
+
+ /**
+ * @DRM_PVR_CTX_PRIORITY_HIGH: Priority above normal.
+ * Note this requires ``CAP_SYS_NICE`` or ``DRM_MASTER``.
+ */
+ DRM_PVR_CTX_PRIORITY_HIGH = 512,
+};
+
+/**
+ * enum drm_pvr_ctx_type - Arguments for
+ * &struct drm_pvr_ioctl_create_context_args.type
+ */
+enum drm_pvr_ctx_type {
+ /**
+ * @DRM_PVR_CTX_TYPE_RENDER: Render context.
+ */
+ DRM_PVR_CTX_TYPE_RENDER = 0,
+
+ /**
+ * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context.
+ */
+ DRM_PVR_CTX_TYPE_COMPUTE,
+
+ /**
+ * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data
+ * master.
+ */
+ DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
+};
+
+/**
+ * struct drm_pvr_ioctl_create_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_CONTEXT
+ */
+struct drm_pvr_ioctl_create_context_args {
+ /**
+ * @type: [IN] Type of context to create.
+ *
+ * This must be one of the values defined by &enum drm_pvr_ctx_type.
+ */
+ __u32 type;
+
+ /** @flags: [IN] Flags for context. */
+ __u32 flags;
+
+ /**
+ * @priority: [IN] Priority of new context.
+ *
+ * This must be one of the values defined by &enum drm_pvr_ctx_priority.
+ */
+ __s32 priority;
+
+ /** @handle: [OUT] Handle for new context. */
+ __u32 handle;
+
+ /**
+ * @static_context_state: [IN] Pointer to static context state stream.
+ */
+ __u64 static_context_state;
+
+ /**
+ * @static_context_state_len: [IN] Length of static context state, in bytes.
+ */
+ __u32 static_context_state_len;
+
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that this context is
+ * associated with.
+ */
+ __u32 vm_context_handle;
+
+ /**
+ * @callstack_addr: [IN] Address for initial call stack pointer. Only valid
+ * if @type is %DRM_PVR_CTX_TYPE_RENDER, otherwise must be 0.
+ */
+ __u64 callstack_addr;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_context_args {
+ /**
+ * @handle: [IN] Handle for context to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_FREE_LIST and DESTROY_FREE_LIST interfaces
+ */
+
+/**
+ * struct drm_pvr_ioctl_create_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_FREE_LIST
+ *
+ * Free list arguments have the following constraints :
+ *
+ * - @max_num_pages must be greater than zero.
+ * - @grow_threshold must be between 0 and 100.
+ * - @grow_num_pages must be less than or equal to &max_num_pages.
+ * - @initial_num_pages, @max_num_pages and @grow_num_pages must be multiples
+ * of 4.
+ * - When &grow_num_pages is 0, @initial_num_pages must be equal to
+ * @max_num_pages.
+ * - When &grow_num_pages is non-zero, @initial_num_pages must be less than
+ * @max_num_pages.
+ */
+struct drm_pvr_ioctl_create_free_list_args {
+ /**
+ * @free_list_gpu_addr: [IN] Address of GPU mapping of buffer object
+ * containing memory to be used by free list.
+ *
+ * The mapped region of the buffer object must be at least
+ * @max_num_pages * ``sizeof(__u32)``.
+ *
+ * The buffer object must have been created with
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT set and
+ * %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS not set.
+ */
+ __u64 free_list_gpu_addr;
+
+ /** @initial_num_pages: [IN] Pages initially allocated to free list. */
+ __u32 initial_num_pages;
+
+ /** @max_num_pages: [IN] Maximum number of pages in free list. */
+ __u32 max_num_pages;
+
+ /** @grow_num_pages: [IN] Pages to grow free list by per request. */
+ __u32 grow_num_pages;
+
+ /**
+ * @grow_threshold: [IN] Percentage of FL memory used that should
+ * trigger a new grow request.
+ */
+ __u32 grow_threshold;
+
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that the free list buffer
+ * object is mapped in.
+ */
+ __u32 vm_context_handle;
+
+ /**
+ * @handle: [OUT] Handle for created free list.
+ */
+ __u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_FREE_LIST
+ */
+struct drm_pvr_ioctl_destroy_free_list_args {
+ /**
+ * @handle: [IN] Handle for free list to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces
+ */
+
+/**
+ * struct drm_pvr_create_hwrt_geom_data_args - Geometry data arguments used for
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.geom_data_args.
+ */
+struct drm_pvr_create_hwrt_geom_data_args {
+ /** @tpc_dev_addr: [IN] Tail pointer cache GPU virtual address. */
+ __u64 tpc_dev_addr;
+
+ /** @tpc_size: [IN] Size of TPC, in bytes. */
+ __u32 tpc_size;
+
+ /** @tpc_stride: [IN] Stride between layers in TPC, in pages */
+ __u32 tpc_stride;
+
+ /** @vheap_table_dev_addr: [IN] VHEAP table GPU virtual address. */
+ __u64 vheap_table_dev_addr;
+
+ /** @rtc_dev_addr: [IN] Render Target Cache virtual address. */
+ __u64 rtc_dev_addr;
+};
+
+/**
+ * struct drm_pvr_create_hwrt_rt_data_args - Render target arguments used for
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.rt_data_args.
+ */
+struct drm_pvr_create_hwrt_rt_data_args {
+ /** @pm_mlist_dev_addr: [IN] PM MLIST GPU virtual address. */
+ __u64 pm_mlist_dev_addr;
+
+ /** @macrotile_array_dev_addr: [IN] Macrotile array GPU virtual address. */
+ __u64 macrotile_array_dev_addr;
+
+ /** @region_header_dev_addr: [IN] Region header array GPU virtual address. */
+ __u64 region_header_dev_addr;
+};
+
+#define PVR_DRM_HWRT_FREE_LIST_LOCAL 0
+#define PVR_DRM_HWRT_FREE_LIST_GLOBAL 1U
+
+/**
+ * struct drm_pvr_ioctl_create_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_create_hwrt_dataset_args {
+ /** @geom_data_args: [IN] Geometry data arguments. */
+ struct drm_pvr_create_hwrt_geom_data_args geom_data_args;
+
+ /**
+ * @rt_data_args: [IN] Array of render target arguments.
+ *
+ * Each entry in this array represents a render target in a double buffered
+ * setup.
+ */
+ struct drm_pvr_create_hwrt_rt_data_args rt_data_args[2];
+
+ /**
+ * @free_list_handles: [IN] Array of free list handles.
+ *
+ * free_list_handles[PVR_DRM_HWRT_FREE_LIST_LOCAL] must have initial
+ * size of at least that reported by
+ * &drm_pvr_dev_query_runtime_info.free_list_min_pages.
+ */
+ __u32 free_list_handles[2];
+
+ /** @width: [IN] Width in pixels. */
+ __u32 width;
+
+ /** @height: [IN] Height in pixels. */
+ __u32 height;
+
+ /** @samples: [IN] Number of samples. */
+ __u32 samples;
+
+ /** @layers: [IN] Number of layers. */
+ __u32 layers;
+
+ /** @isp_merge_lower_x: [IN] Lower X coefficient for triangle merging. */
+ __u32 isp_merge_lower_x;
+
+ /** @isp_merge_lower_y: [IN] Lower Y coefficient for triangle merging. */
+ __u32 isp_merge_lower_y;
+
+ /** @isp_merge_scale_x: [IN] Scale X coefficient for triangle merging. */
+ __u32 isp_merge_scale_x;
+
+ /** @isp_merge_scale_y: [IN] Scale Y coefficient for triangle merging. */
+ __u32 isp_merge_scale_y;
+
+ /** @isp_merge_upper_x: [IN] Upper X coefficient for triangle merging. */
+ __u32 isp_merge_upper_x;
+
+ /** @isp_merge_upper_y: [IN] Upper Y coefficient for triangle merging. */
+ __u32 isp_merge_upper_y;
+
+ /**
+ * @region_header_size: [IN] Size of region header array. This common field is used by
+ * both render targets in this data set.
+ *
+ * The units for this field differ depending on what version of the simple internal
+ * parameter format the device uses. If format 2 is in use then this is interpreted as the
+ * number of region headers. For other formats it is interpreted as the size in dwords.
+ */
+ __u32 region_header_size;
+
+ /**
+ * @handle: [OUT] Handle for created HWRT dataset.
+ */
+ __u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_destroy_hwrt_dataset_args {
+ /**
+ * @handle: [IN] Handle for HWRT dataset to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL SUBMIT_JOBS interface
+ */
+
+/**
+ * DOC: Flags for the drm_pvr_sync_op object.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_HANDLE_TYPE_MASK
+ *
+ * Handle type mask for the drm_pvr_sync_op::flags field.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ
+ *
+ * Indicates the handle passed in drm_pvr_sync_op::handle is a syncobj handle.
+ * This is the default type.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ
+ *
+ * Indicates the handle passed in drm_pvr_sync_op::handle is a timeline syncobj handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_SIGNAL
+ *
+ * Signal operation requested. The out-fence bound to the job will be attached to
+ * the syncobj whose handle is passed in drm_pvr_sync_op::handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_WAIT
+ *
+ * Wait operation requested. The job will wait for this particular syncobj or syncobj
+ * point to be signaled before being started.
+ * This is the default operation.
+ */
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK 0xf
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ 0
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ 1
+#define DRM_PVR_SYNC_OP_FLAG_SIGNAL _BITULL(31)
+#define DRM_PVR_SYNC_OP_FLAG_WAIT 0
+
+#define DRM_PVR_SYNC_OP_FLAGS_MASK (DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK | \
+ DRM_PVR_SYNC_OP_FLAG_SIGNAL)
+
+/**
+ * struct drm_pvr_sync_op - Object describing a sync operation
+ */
+struct drm_pvr_sync_op {
+ /** @handle: Handle of sync object. */
+ __u32 handle;
+
+ /** @flags: Combination of ``DRM_PVR_SYNC_OP_FLAG_`` flags. */
+ __u32 flags;
+
+ /** @value: Timeline value for this drm_syncobj. MBZ for a binary syncobj. */
+ __u64 value;
+};
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl geometry command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST
+ *
+ * Indicates if this the first command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST
+ *
+ * Indicates if this the last command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE
+ *
+ * Forces to use single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the geometry cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST | \
+ DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST | \
+ DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl fragment command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE
+ *
+ * Use single core in a multi core setup.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER
+ *
+ * Indicates whether a depth buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER
+ *
+ * Indicates whether a stencil buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP
+ *
+ * Disallow compute overlapped with this render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS
+ *
+ * Indicates whether this render produces visibility results.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER
+ *
+ * Indicates whether partial renders write to a scratch buffer instead of
+ * the final surface. It also forces the full screen copy expected to be
+ * present on the last render after all partial renders have completed.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE
+ *
+ * Disable pixel merging for this render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the fragment cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP _BITULL(3)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER _BITULL(4)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS _BITULL(5)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER _BITULL(6)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE _BITULL(7)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl compute command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP
+ *
+ * Disallow other jobs overlapped with this compute.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE
+ *
+ * Forces to use single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the compute cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP | \
+ DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl transfer command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+ *
+ * Forces job to use a single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the transfer cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE _BITULL(0)
+
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK \
+ DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+
+/**
+ * enum drm_pvr_job_type - Arguments for &struct drm_pvr_job.job_type
+ */
+enum drm_pvr_job_type {
+ /** @DRM_PVR_JOB_TYPE_GEOMETRY: Job type is geometry. */
+ DRM_PVR_JOB_TYPE_GEOMETRY = 0,
+
+ /** @DRM_PVR_JOB_TYPE_FRAGMENT: Job type is fragment. */
+ DRM_PVR_JOB_TYPE_FRAGMENT,
+
+ /** @DRM_PVR_JOB_TYPE_COMPUTE: Job type is compute. */
+ DRM_PVR_JOB_TYPE_COMPUTE,
+
+ /** @DRM_PVR_JOB_TYPE_TRANSFER_FRAG: Job type is a fragment transfer. */
+ DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
+};
+
+/**
+ * struct drm_pvr_hwrt_data_ref - Reference HWRT data
+ */
+struct drm_pvr_hwrt_data_ref {
+ /** @set_handle: HWRT data set handle. */
+ __u32 set_handle;
+
+ /** @data_index: Index of the HWRT data inside the data set. */
+ __u32 data_index;
+};
+
+/**
+ * struct drm_pvr_job - Job arguments passed to the %DRM_IOCTL_PVR_SUBMIT_JOBS ioctl
+ */
+struct drm_pvr_job {
+ /**
+ * @type: [IN] Type of job being submitted
+ *
+ * This must be one of the values defined by &enum drm_pvr_job_type.
+ */
+ __u32 type;
+
+ /**
+ * @context_handle: [IN] Context handle.
+ *
+ * When @job_type is %DRM_PVR_JOB_TYPE_RENDER, %DRM_PVR_JOB_TYPE_COMPUTE or
+ * %DRM_PVR_JOB_TYPE_TRANSFER_FRAG, this must be a valid handle returned by
+ * %DRM_IOCTL_PVR_CREATE_CONTEXT. The type of context must be compatible
+ * with the type of job being submitted.
+ *
+ * When @job_type is %DRM_PVR_JOB_TYPE_NULL, this must be zero.
+ */
+ __u32 context_handle;
+
+ /**
+ * @flags: [IN] Flags for command.
+ *
+ * Those are job-dependent. See all ``DRM_PVR_SUBMIT_JOB_*``.
+ */
+ __u32 flags;
+
+ /**
+ * @cmd_stream_len: [IN] Length of command stream, in bytes.
+ */
+ __u32 cmd_stream_len;
+
+ /**
+ * @cmd_stream: [IN] Pointer to command stream for command.
+ *
+ * The command stream must be u64-aligned.
+ */
+ __u64 cmd_stream;
+
+ /** @sync_ops: [IN] Fragment sync operations. */
+ struct drm_pvr_obj_array sync_ops;
+
+ /**
+ * @hwrt: [IN] HWRT data used by render jobs (geometry or fragment).
+ *
+ * Must be zero for non-render jobs.
+ */
+ struct drm_pvr_hwrt_data_ref hwrt;
+};
+
+/**
+ * struct drm_pvr_ioctl_submit_jobs_args - Arguments for %DRM_IOCTL_PVR_SUBMIT_JOB
+ *
+ * If the syscall returns an error it is important to check the value of
+ * @jobs.count. This indicates the index into @jobs.array where the
+ * error occurred.
+ */
+struct drm_pvr_ioctl_submit_jobs_args {
+ /** @jobs: [IN] Array of jobs to submit. */
+ struct drm_pvr_obj_array jobs;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* PVR_DRM_UAPI_H */
diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h
index 2d348744a853..9dab32316aee 100644
--- a/include/uapi/drm/qaic_accel.h
+++ b/include/uapi/drm/qaic_accel.h
@@ -242,12 +242,12 @@ struct qaic_attach_slice_entry {
* @dbc_id: In. Associate the sliced BO with this DBC.
* @handle: In. GEM handle of the BO to slice.
* @dir: In. Direction of data flow. 1 = DMA_TO_DEVICE, 2 = DMA_FROM_DEVICE
- * @size: In. Total length of the BO.
- * If BO is imported (DMABUF/PRIME) then this size
- * should not exceed the size of DMABUF provided.
- * If BO is allocated using DRM_IOCTL_QAIC_CREATE_BO
- * then this size should be exactly same as the size
- * provided during DRM_IOCTL_QAIC_CREATE_BO.
+ * @size: In. Total length of BO being used. This should not exceed base
+ * size of BO (struct drm_gem_object.base)
+ * For BOs being allocated using DRM_IOCTL_QAIC_CREATE_BO, size of
+ * BO requested is PAGE_SIZE aligned then allocated hence allocated
+ * BO size maybe bigger. This size should not exceed the new
+ * PAGE_SIZE aligned BO size.
* @dev_addr: In. Device address this slice pushes to or pulls from.
* @db_addr: In. Address of the doorbell to ring.
* @db_data: In. Data to write to the doorbell.
@@ -287,8 +287,9 @@ struct qaic_execute_entry {
* struct qaic_partial_execute_entry - Defines a BO to resize and submit.
* @handle: In. GEM handle of the BO to commit to the device.
* @dir: In. Direction of data. 1 = to device, 2 = from device.
- * @resize: In. New size of the BO. Must be <= the original BO size. 0 is
- * short for no resize.
+ * @resize: In. New size of the BO. Must be <= the original BO size.
+ * @resize as 0 would be interpreted as no DMA transfer is
+ * involved.
*/
struct qaic_partial_execute_entry {
__u32 handle;
@@ -372,6 +373,16 @@ struct qaic_perf_stats_entry {
__u32 pad;
};
+/**
+ * struct qaic_detach_slice - Detaches slicing configuration from BO.
+ * @handle: In. GEM handle of the BO to detach slicing configuration.
+ * @pad: Structure padding. Must be 0.
+ */
+struct qaic_detach_slice {
+ __u32 handle;
+ __u32 pad;
+};
+
#define DRM_QAIC_MANAGE 0x00
#define DRM_QAIC_CREATE_BO 0x01
#define DRM_QAIC_MMAP_BO 0x02
@@ -380,6 +391,7 @@ struct qaic_perf_stats_entry {
#define DRM_QAIC_PARTIAL_EXECUTE_BO 0x05
#define DRM_QAIC_WAIT_BO 0x06
#define DRM_QAIC_PERF_STATS_BO 0x07
+#define DRM_QAIC_DETACH_SLICE_BO 0x08
#define DRM_IOCTL_QAIC_MANAGE DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MANAGE, struct qaic_manage_msg)
#define DRM_IOCTL_QAIC_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_CREATE_BO, struct qaic_create_bo)
@@ -389,6 +401,7 @@ struct qaic_perf_stats_entry {
#define DRM_IOCTL_QAIC_PARTIAL_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_PARTIAL_EXECUTE_BO, struct qaic_execute)
#define DRM_IOCTL_QAIC_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_WAIT_BO, struct qaic_wait)
#define DRM_IOCTL_QAIC_PERF_STATS_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_PERF_STATS_BO, struct qaic_perf_stats)
+#define DRM_IOCTL_QAIC_DETACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_DETACH_SLICE_BO, struct qaic_detach_slice)
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 3dfc0af8756a..dce1835eced4 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -41,6 +41,7 @@ extern "C" {
#define DRM_V3D_PERFMON_CREATE 0x08
#define DRM_V3D_PERFMON_DESTROY 0x09
#define DRM_V3D_PERFMON_GET_VALUES 0x0a
+#define DRM_V3D_SUBMIT_CPU 0x0b
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -56,6 +57,7 @@ extern "C" {
struct drm_v3d_perfmon_destroy)
#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
struct drm_v3d_perfmon_get_values)
+#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
#define DRM_V3D_SUBMIT_EXTENSION 0x02
@@ -69,7 +71,13 @@ extern "C" {
struct drm_v3d_extension {
__u64 next;
__u32 id;
-#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02
+#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03
+#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04
+#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05
+#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06
+#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07
__u32 flags; /* mbz */
};
@@ -93,6 +101,7 @@ enum v3d_queue {
V3D_TFU,
V3D_CSD,
V3D_CACHE_CLEAN,
+ V3D_CPU,
};
/**
@@ -276,6 +285,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
DRM_V3D_PARAM_SUPPORTS_PERFMON,
DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
+ DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
};
struct drm_v3d_get_param {
@@ -319,6 +329,11 @@ struct drm_v3d_submit_tfu {
/* Pointer to an array of ioctl extensions*/
__u64 extensions;
+
+ struct {
+ __u32 ioc;
+ __u32 pad;
+ } v71;
};
/* Submits a compute shader for dispatch. This job will block on any
@@ -356,6 +371,234 @@ struct drm_v3d_submit_csd {
__u32 pad;
};
+/**
+ * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an
+ * indirect CSD
+ *
+ * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it
+ * points to this extension to define a indirect CSD submission. It creates a
+ * CPU job linked to a CSD job. The CPU job waits for the indirect CSD
+ * dependencies and, once they are signaled, it updates the CSD job config
+ * before allowing the CSD job execution.
+ */
+struct drm_v3d_indirect_csd {
+ struct drm_v3d_extension base;
+
+ /* Indirect CSD */
+ struct drm_v3d_submit_csd submit;
+
+ /* Handle of the indirect BO, that should be also attached to the
+ * indirect CSD.
+ */
+ __u32 indirect;
+
+ /* Offset within the BO where the workgroup counts are stored */
+ __u32 offset;
+
+ /* Workgroups size */
+ __u32 wg_size;
+
+ /* Indices of the uniforms with the workgroup dispatch counts
+ * in the uniform stream. If the uniform rewrite is not needed,
+ * the offset must be 0xffffffff.
+ */
+ __u32 wg_uniform_offsets[3];
+};
+
+/**
+ * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate
+ * a timestamp query
+ *
+ * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to
+ * this extension to define a timestamp query submission. This CPU job will
+ * calculate the timestamp query and update the query value within the
+ * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate
+ * query availability.
+ */
+struct drm_v3d_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* mbz */
+ __u32 pad;
+};
+
+/**
+ * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to
+ * reset timestamp queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a reset timestamp submission. This CPU
+ * job will reset the timestamp queries based on value offset of the first
+ * query. Moreover, it will reset the timestamp syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Offset of the first query within the timestamp BO for its value */
+ __u32 offset;
+
+ /* Number of queries */
+ __u32 count;
+};
+
+/**
+ * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy
+ * query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a copy timestamp query submission. This
+ * CPU job will copy the timestamp queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+};
+
+/**
+ * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to
+ * reset performance queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a reset performance submission. This CPU
+ * job will reset the performance queries by resetting the values of the
+ * performance monitors. Moreover, it will reset the syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+/**
+ * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy
+ * performance query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a copy performance query submission. This
+ * CPU job will copy the performance queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Number of performance counters related to this query pool */
+ __u32 ncounters;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+struct drm_v3d_submit_cpu {
+ /* Pointer to a u32 array of the BOs that are referenced by the job.
+ *
+ * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO,
+ * that contains the workgroup counts.
+ *
+ * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO,
+ * that will contain the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only
+ * one BO, that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two
+ * BOs. The first is the BO where the timestamp queries will be written
+ * to. The second is the BO that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no
+ * BOs.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one
+ * BO, where the performance queries will be written.
+ */
+ __u64 bo_handles;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+};
+
enum {
V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
V3D_PERFCNT_FEP_VALID_PRIMS,
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 7b158fcb02b4..c2ce71987e9b 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -64,6 +64,16 @@ struct drm_virtgpu_map {
__u32 pad;
};
+#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
+#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
+ VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
+ 0)
+struct drm_virtgpu_execbuffer_syncobj {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+};
+
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
@@ -73,7 +83,11 @@ struct drm_virtgpu_execbuffer {
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
- __u32 pad;
+ __u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
+ __u32 num_in_syncobjs;
+ __u32 num_out_syncobjs;
+ __u64 in_syncobjs;
+ __u64 out_syncobjs;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -83,6 +97,7 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
+#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
struct drm_virtgpu_getparam {
__u64 param;
@@ -184,6 +199,7 @@ struct drm_virtgpu_resource_create_blob {
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
struct drm_virtgpu_context_set_param {
__u64 param;
__u64 value;
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
new file mode 100644
index 000000000000..bb0c8a994116
--- /dev/null
+++ b/include/uapi/drm/xe_drm.h
@@ -0,0 +1,1327 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ * Sections in this file are organized as follows:
+ * 1. IOCTL definition
+ * 2. Extension definition and helper structs
+ * 3. IOCTL's Query structs in the order of the Query's entries.
+ * 4. The rest of IOCTL structs in the order of IOCTL declaration.
+ */
+
+/**
+ * DOC: Xe Device Block Diagram
+ *
+ * The diagram below represents a high-level simplification of a discrete
+ * GPU supported by the Xe driver. It shows some device components which
+ * are necessary to understand this API, as well as how their relations
+ * to each other. This diagram does not represent real hardware::
+ *
+ * ┌──────────────────────────────────────────────────────────────────┐
+ * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
+ * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │
+ * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │
+ * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │
+ * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
+ * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │
+ * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │
+ * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │
+ * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
+ * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
+ * └─────────────────────────────Device0───────┬──────────────────────┘
+ * │
+ * ───────────────────────┴────────── PCI bus
+ */
+
+/**
+ * DOC: Xe uAPI Overview
+ *
+ * This section aims to describe the Xe's IOCTL entries, its structs, and other
+ * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
+ * entries and usage.
+ *
+ * List of supported IOCTLs:
+ * - &DRM_IOCTL_XE_DEVICE_QUERY
+ * - &DRM_IOCTL_XE_GEM_CREATE
+ * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ * - &DRM_IOCTL_XE_VM_CREATE
+ * - &DRM_IOCTL_XE_VM_DESTROY
+ * - &DRM_IOCTL_XE_VM_BIND
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ * - &DRM_IOCTL_XE_EXEC
+ * - &DRM_IOCTL_XE_WAIT_USER_FENCE
+ */
+
+/*
+ * xe specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
+ */
+#define DRM_XE_DEVICE_QUERY 0x00
+#define DRM_XE_GEM_CREATE 0x01
+#define DRM_XE_GEM_MMAP_OFFSET 0x02
+#define DRM_XE_VM_CREATE 0x03
+#define DRM_XE_VM_DESTROY 0x04
+#define DRM_XE_VM_BIND 0x05
+#define DRM_XE_EXEC_QUEUE_CREATE 0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
+#define DRM_XE_EXEC 0x09
+#define DRM_XE_WAIT_USER_FENCE 0x0a
+/* Must be kept compact -- no holes */
+
+#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
+#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
+#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
+#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
+#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
+#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
+#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
+#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
+#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
+#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
+#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
+
+/**
+ * DOC: Xe IOCTL Extensions
+ *
+ * Before detailing the IOCTLs and its structs, it is important to highlight
+ * that every IOCTL in Xe is extensible.
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct drm_xe_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+*/
+
+/**
+ * struct drm_xe_user_extension - Base class for defining a chain of extensions
+ */
+struct drm_xe_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct drm_xe_user_extension, or zero if the end.
+ */
+ __u64 next_extension;
+
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct drm_xe_user_extension.
+ */
+ __u32 name;
+
+ /**
+ * @pad: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_xe_ext_set_property - Generic set property extension
+ *
+ * A generic struct that allows any of the Xe's IOCTL to be extended
+ * with a set_property operation.
+ */
+struct drm_xe_ext_set_property {
+ /** @base: base user extension */
+ struct drm_xe_user_extension base;
+
+ /** @property: property to set */
+ __u32 property;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_engine_class_instance - instance of an engine class
+ *
+ * It is returned as part of the @drm_xe_engine, but it also is used as
+ * the input of engine selection for both @drm_xe_exec_queue_create and
+ * @drm_xe_query_engine_cycles
+ *
+ * The @engine_class can be:
+ * - %DRM_XE_ENGINE_CLASS_RENDER
+ * - %DRM_XE_ENGINE_CLASS_COPY
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
+ * - %DRM_XE_ENGINE_CLASS_COMPUTE
+ * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
+ * hardware engine class). Used for creating ordered queues of VM
+ * bind operations.
+ */
+struct drm_xe_engine_class_instance {
+#define DRM_XE_ENGINE_CLASS_RENDER 0
+#define DRM_XE_ENGINE_CLASS_COPY 1
+#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
+#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
+#define DRM_XE_ENGINE_CLASS_COMPUTE 4
+#define DRM_XE_ENGINE_CLASS_VM_BIND 5
+ /** @engine_class: engine class id */
+ __u16 engine_class;
+ /** @engine_instance: engine instance id */
+ __u16 engine_instance;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad;
+};
+
+/**
+ * struct drm_xe_engine - describe hardware engine
+ */
+struct drm_xe_engine {
+ /** @instance: The @drm_xe_engine_class_instance */
+ struct drm_xe_engine_class_instance instance;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_query_engines - describe engines
+ *
+ * If a query is made with a struct @drm_xe_device_query where .query
+ * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
+ * struct @drm_xe_query_engines in .data.
+ */
+struct drm_xe_query_engines {
+ /** @num_engines: number of engines returned in @engines */
+ __u32 num_engines;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @engines: The returned engines for this device */
+ struct drm_xe_engine engines[];
+};
+
+/**
+ * enum drm_xe_memory_class - Supported memory classes.
+ */
+enum drm_xe_memory_class {
+ /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
+ DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
+ /**
+ * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
+ * represents the memory that is local to the device, which we
+ * call VRAM. Not valid on integrated platforms.
+ */
+ DRM_XE_MEM_REGION_CLASS_VRAM
+};
+
+/**
+ * struct drm_xe_mem_region - Describes some region as known to
+ * the driver.
+ */
+struct drm_xe_mem_region {
+ /**
+ * @mem_class: The memory class describing this region.
+ *
+ * See enum drm_xe_memory_class for supported values.
+ */
+ __u16 mem_class;
+ /**
+ * @instance: The unique ID for this region, which serves as the
+ * index in the placement bitmask used as argument for
+ * &DRM_IOCTL_XE_GEM_CREATE
+ */
+ __u16 instance;
+ /**
+ * @min_page_size: Min page-size in bytes for this region.
+ *
+ * When the kernel allocates memory for this region, the
+ * underlying pages will be at least @min_page_size in size.
+ * Buffer objects with an allowable placement in this region must be
+ * created with a size aligned to this value.
+ * GPU virtual address mappings of (parts of) buffer objects that
+ * may be placed in this region must also have their GPU virtual
+ * address and range aligned to this value.
+ * Affected IOCTLS will return %-EINVAL if alignment restrictions are
+ * not met.
+ */
+ __u32 min_page_size;
+ /**
+ * @total_size: The usable size in bytes for this region.
+ */
+ __u64 total_size;
+ /**
+ * @used: Estimate of the memory used in bytes for this region.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero.
+ */
+ __u64 used;
+ /**
+ * @cpu_visible_size: How much of this region can be CPU
+ * accessed, in bytes.
+ *
+ * This will always be <= @total_size, and the remainder (if
+ * any) will not be CPU accessible. If the CPU accessible part
+ * is smaller than @total_size then this is referred to as a
+ * small BAR system.
+ *
+ * On systems without small BAR (full BAR), the probed_size will
+ * always equal the @total_size, since all of it will be CPU
+ * accessible.
+ *
+ * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
+ * regions (for other types the value here will always equal
+ * zero).
+ */
+ __u64 cpu_visible_size;
+ /**
+ * @cpu_visible_used: Estimate of CPU visible memory used, in
+ * bytes.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero. Note this is only currently tracked for
+ * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
+ * here will always be zero).
+ */
+ __u64 cpu_visible_used;
+ /** @reserved: Reserved */
+ __u64 reserved[6];
+};
+
+/**
+ * struct drm_xe_query_mem_regions - describe memory regions
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
+ * struct drm_xe_query_mem_regions in .data.
+ */
+struct drm_xe_query_mem_regions {
+ /** @num_mem_regions: number of memory regions returned in @mem_regions */
+ __u32 num_mem_regions;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @mem_regions: The returned memory regions for this device */
+ struct drm_xe_mem_region mem_regions[];
+};
+
+/**
+ * struct drm_xe_query_config - describe the device configuration
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
+ * struct drm_xe_query_config in .data.
+ *
+ * The index in @info can be:
+ * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
+ * and the device revision (next 8 bits)
+ * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
+ * configuration, see list below
+ *
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
+ * has usable VRAM
+ * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
+ * required by this device, typically SZ_4K or SZ_64K
+ * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
+ * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
+ * available exec queue priority
+ */
+struct drm_xe_query_config {
+ /** @num_params: number of parameters returned in info */
+ __u32 num_params;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
+#define DRM_XE_QUERY_CONFIG_FLAGS 1
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
+#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
+#define DRM_XE_QUERY_CONFIG_VA_BITS 3
+#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
+ /** @info: array of elements containing the config info */
+ __u64 info[];
+};
+
+/**
+ * struct drm_xe_gt - describe an individual GT.
+ *
+ * To be used with drm_xe_query_gt_list, which will return a list with all the
+ * existing GT individual descriptions.
+ * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
+ * implementing graphics and/or media operations.
+ *
+ * The index in @type can be:
+ * - %DRM_XE_QUERY_GT_TYPE_MAIN
+ * - %DRM_XE_QUERY_GT_TYPE_MEDIA
+ */
+struct drm_xe_gt {
+#define DRM_XE_QUERY_GT_TYPE_MAIN 0
+#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
+ /** @type: GT type: Main or Media */
+ __u16 type;
+ /** @tile_id: Tile ID where this GT lives (Information only) */
+ __u16 tile_id;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad[3];
+ /** @reference_clock: A clock frequency for timestamp */
+ __u32 reference_clock;
+ /**
+ * @near_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are nearest to the current engines
+ * of this GT.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 near_mem_regions;
+ /**
+ * @far_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are far from the engines of this GT.
+ * In general, they have extra indirections when compared to the
+ * @near_mem_regions. For a discrete device this could mean system
+ * memory and memory living in a different tile.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 far_mem_regions;
+ /** @reserved: Reserved */
+ __u64 reserved[8];
+};
+
+/**
+ * struct drm_xe_query_gt_list - A list with GT description items.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
+ * drm_xe_query_gt_list in .data.
+ */
+struct drm_xe_query_gt_list {
+ /** @num_gt: number of GT items returned in gt_list */
+ __u32 num_gt;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @gt_list: The GT list returned for this device */
+ struct drm_xe_gt gt_list[];
+};
+
+/**
+ * struct drm_xe_query_topology_mask - describe the topology mask of a GT
+ *
+ * This is the hardware topology which reflects the internal physical
+ * structure of the GPU.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
+ * struct drm_xe_query_topology_mask in .data.
+ *
+ * The @type can be:
+ * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
+ * (DSS) available for geometry operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for geometry.
+ * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
+ * (DSS) available for compute operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_COMPUTE ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for compute.
+ * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
+ * available per Dual Sub Slices (DSS). For example a query response
+ * containing the following in mask:
+ * ``EU_PER_DSS ff ff 00 00 00 00 00 00``
+ * means each DSS has 16 EU.
+ */
+struct drm_xe_query_topology_mask {
+ /** @gt_id: GT ID the mask is associated with */
+ __u16 gt_id;
+
+#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
+#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
+#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
+ /** @type: type of mask */
+ __u16 type;
+
+ /** @num_bytes: number of bytes in requested mask */
+ __u32 num_bytes;
+
+ /** @mask: little-endian mask of @num_bytes */
+ __u8 mask[];
+};
+
+/**
+ * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
+ *
+ * If a query is made with a struct drm_xe_device_query where .query is equal to
+ * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
+ * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
+ * .data points to this allocated structure.
+ *
+ * The query returns the engine cycles, which along with GT's @reference_clock,
+ * can be used to calculate the engine timestamp. In addition the
+ * query returns a set of cpu timestamps that indicate when the command
+ * streamer cycle count was captured.
+ */
+struct drm_xe_query_engine_cycles {
+ /**
+ * @eci: This is input by the user and is the engine for which command
+ * streamer cycles is queried.
+ */
+ struct drm_xe_engine_class_instance eci;
+
+ /**
+ * @clockid: This is input by the user and is the reference clock id for
+ * CPU timestamp. For definition, see clock_gettime(2) and
+ * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
+ * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
+ */
+ __s32 clockid;
+
+ /** @width: Width of the engine cycle counter in bits. */
+ __u32 width;
+
+ /**
+ * @engine_cycles: Engine cycles as read from its register
+ * at 0x358 offset.
+ */
+ __u64 engine_cycles;
+
+ /**
+ * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
+ * reading the engine_cycles register using the reference clockid set by the
+ * user.
+ */
+ __u64 cpu_timestamp;
+
+ /**
+ * @cpu_delta: Time delta in ns captured around reading the lower dword
+ * of the engine_cycles register.
+ */
+ __u64 cpu_delta;
+};
+
+/**
+ * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
+ * structure to query device information
+ *
+ * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
+ * and sets the value in the query member. This determines the type of
+ * the structure provided by the driver in data, among struct drm_xe_query_*.
+ *
+ * The @query can be:
+ * - %DRM_XE_DEVICE_QUERY_ENGINES
+ * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
+ * - %DRM_XE_DEVICE_QUERY_CONFIG
+ * - %DRM_XE_DEVICE_QUERY_GT_LIST
+ * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
+ * configuration of the device such as information on slices, memory,
+ * caches, and so on. It is provided as a table of key / value
+ * attributes.
+ * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
+ * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
+ *
+ * If size is set to 0, the driver fills it with the required size for
+ * the requested type of data to query. If size is equal to the required
+ * size, the queried information is copied into data. If size is set to
+ * a value different from 0 and different from the required size, the
+ * IOCTL call returns -EINVAL.
+ *
+ * For example the following code snippet allows retrieving and printing
+ * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_query_engines *engines;
+ * struct drm_xe_device_query query = {
+ * .extensions = 0,
+ * .query = DRM_XE_DEVICE_QUERY_ENGINES,
+ * .size = 0,
+ * .data = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * engines = malloc(query.size);
+ * query.data = (uintptr_t)engines;
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * for (int i = 0; i < engines->num_engines; i++) {
+ * printf("Engine %d: %s\n", i,
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COPY ? "COPY":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
+ * "UNKNOWN");
+ * }
+ * free(engines);
+ */
+struct drm_xe_device_query {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_DEVICE_QUERY_ENGINES 0
+#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
+#define DRM_XE_DEVICE_QUERY_CONFIG 2
+#define DRM_XE_DEVICE_QUERY_GT_LIST 3
+#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
+#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
+#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
+ /** @query: The type of data to query */
+ __u32 query;
+
+ /** @size: Size of the queried data */
+ __u32 size;
+
+ /** @data: Queried data is placed here */
+ __u64 data;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
+ * gem creation
+ *
+ * The @flags can be:
+ * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
+ * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
+ * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
+ * possible placement, ensure that the corresponding VRAM allocation
+ * will always use the CPU accessible part of VRAM. This is important
+ * for small-bar systems (on full-bar systems this gets turned into a
+ * noop).
+ * Note1: System memory can be used as an extra placement if the kernel
+ * should spill the allocation to system memory, if space can't be made
+ * available in the CPU accessible part of VRAM (giving the same
+ * behaviour as the i915 interface, see
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
+ * Note2: For clear-color CCS surfaces the kernel needs to read the
+ * clear-color value stored in the buffer, and on discrete platforms we
+ * need to use VRAM for display surfaces, therefore the kernel requires
+ * setting this flag for such objects, otherwise an error is thrown on
+ * small-bar systems.
+ *
+ * @cpu_caching supports the following values:
+ * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
+ * caching. On iGPU this can't be used for scanout surfaces. Currently
+ * not allowed for objects placed in VRAM.
+ * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
+ * is uncached. Scanout surfaces should likely use this. All objects
+ * that can be placed in VRAM must use this.
+ */
+struct drm_xe_gem_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @size: Size of the object to be created, must match region
+ * (system or vram) minimum alignment (&min_page_size).
+ */
+ __u64 size;
+
+ /**
+ * @placement: A mask of memory instances of where BO can be placed.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u32 placement;
+
+#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
+#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
+#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
+ /**
+ * @flags: Flags, currently a mask of memory instances of where BO can
+ * be placed
+ */
+ __u32 flags;
+
+ /**
+ * @vm_id: Attached VM, if any
+ *
+ * If a VM is specified, this BO must:
+ *
+ * 1. Only ever be bound to that VM.
+ * 2. Cannot be exported as a PRIME fd.
+ */
+ __u32 vm_id;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+#define DRM_XE_GEM_CPU_CACHING_WB 1
+#define DRM_XE_GEM_CPU_CACHING_WC 2
+ /**
+ * @cpu_caching: The CPU caching mode to select for this object. If
+ * mmaping the object the mode selected here will also be used.
+ */
+ __u16 cpu_caching;
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ */
+struct drm_xe_gem_mmap_offset {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+
+ /** @flags: Must be zero */
+ __u32 flags;
+
+ /** @offset: The fake offset to use for subsequent mmap call */
+ __u64 offset;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
+ *
+ * The @flags can be:
+ * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
+ * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
+ * exec submissions to its exec_queues that don't have an upper time
+ * limit on the job execution time. But exec submissions to these
+ * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
+ * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
+ * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * LR VMs can be created in recoverable page-fault mode using
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
+ * If that flag is omitted, the UMD can not rely on the slightly
+ * different per-VM overcommit semantics that are enabled by
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
+ * still enable recoverable pagefaults if supported by the device.
+ * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
+ * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
+ * demand when accessed, and also allows per-VM overcommit of memory.
+ * The xe driver internally uses recoverable pagefaults to implement
+ * this.
+ */
+struct drm_xe_vm_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
+#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
+#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
+ /** @flags: Flags */
+ __u32 flags;
+
+ /** @vm_id: Returned VM ID */
+ __u32 vm_id;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
+ */
+struct drm_xe_vm_destroy {
+ /** @vm_id: VM ID */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_bind_op - run bind operations
+ *
+ * The @op can be:
+ * - %DRM_XE_VM_BIND_OP_MAP
+ * - %DRM_XE_VM_BIND_OP_UNMAP
+ * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
+ * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
+ * - %DRM_XE_VM_BIND_OP_PREFETCH
+ *
+ * and the @flags can be:
+ * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
+ * tables are setup with a special bit which indicates writes are
+ * dropped and all reads return zero. In the future, the NULL flags
+ * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
+ * handle MBZ, and the BO offset MBZ. This flag is intended to
+ * implement VK sparse bindings.
+ */
+struct drm_xe_vm_bind_op {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
+ */
+ __u32 obj;
+
+ /**
+ * @pat_index: The platform defined @pat_index to use for this mapping.
+ * The index basically maps to some predefined memory attributes,
+ * including things like caching, coherency, compression etc. The exact
+ * meaning of the pat_index is platform specific and defined in the
+ * Bspec and PRMs. When the KMD sets up the binding the index here is
+ * encoded into the ppGTT PTE.
+ *
+ * For coherency the @pat_index needs to be at least 1way coherent when
+ * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
+ * will extract the coherency mode from the @pat_index and reject if
+ * there is a mismatch (see note below for pre-MTL platforms).
+ *
+ * Note: On pre-MTL platforms there is only a caching mode and no
+ * explicit coherency mode, but on such hardware there is always a
+ * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
+ * CPU caches even with the caching mode set as uncached. It's only the
+ * display engine that is incoherent (on dgpu it must be in VRAM which
+ * is always mapped as WC on the CPU). However to keep the uapi somewhat
+ * consistent with newer platforms the KMD groups the different cache
+ * levels into the following coherency buckets on all pre-MTL platforms:
+ *
+ * ppGTT UC -> COH_NONE
+ * ppGTT WC -> COH_NONE
+ * ppGTT WT -> COH_NONE
+ * ppGTT WB -> COH_AT_LEAST_1WAY
+ *
+ * In practice UC/WC/WT should only ever used for scanout surfaces on
+ * such platforms (or perhaps in general for dma-buf if shared with
+ * another device) since it is only the display engine that is actually
+ * incoherent. Everything else should typically use WB given that we
+ * have a shared-LLC. On MTL+ this completely changes and the HW
+ * defines the coherency mode as part of the @pat_index, where
+ * incoherent GT access is possible.
+ *
+ * Note: For userptr and externally imported dma-buf the kernel expects
+ * either 1WAY or 2WAY for the @pat_index.
+ *
+ * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
+ * on the @pat_index. For such mappings there is no actual memory being
+ * mapped (the address in the PTE is invalid), so the various PAT memory
+ * attributes likely do not apply. Simply leaving as zero is one
+ * option (still a valid pat_index).
+ */
+ __u16 pat_index;
+
+ /** @pad: MBZ */
+ __u16 pad;
+
+ union {
+ /**
+ * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
+ * ignored for unbind
+ */
+ __u64 obj_offset;
+
+ /** @userptr: user pointer to bind on */
+ __u64 userptr;
+ };
+
+ /**
+ * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
+ */
+ __u64 range;
+
+ /** @addr: Address to operate on, MBZ for UNMAP_ALL */
+ __u64 addr;
+
+#define DRM_XE_VM_BIND_OP_MAP 0x0
+#define DRM_XE_VM_BIND_OP_UNMAP 0x1
+#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
+#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
+#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
+ /** @op: Bind operation to perform */
+ __u32 op;
+
+#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
+#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
+ /** @flags: Bind flags */
+ __u32 flags;
+
+ /**
+ * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
+ * It is a region instance, not a mask.
+ * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
+ */
+ __u32 prefetch_mem_region_instance;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
+ *
+ * Below is an example of a minimal use of @drm_xe_vm_bind to
+ * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
+ * illustrate `userptr`. It can be synchronized by using the example
+ * provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * data = aligned_alloc(ALIGNMENT, BO_SIZE);
+ * struct drm_xe_vm_bind bind = {
+ * .vm_id = vm,
+ * .num_binds = 1,
+ * .bind.obj = 0,
+ * .bind.obj_offset = to_user_pointer(data),
+ * .bind.range = BO_SIZE,
+ * .bind.addr = BIND_ADDRESS,
+ * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
+ * .bind.flags = 0,
+ * .num_syncs = 1,
+ * .syncs = &sync,
+ * .exec_queue_id = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
+ *
+ */
+struct drm_xe_vm_bind {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: The ID of the VM to bind to */
+ __u32 vm_id;
+
+ /**
+ * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
+ * and exec queue must have same vm_id. If zero, the default VM bind engine
+ * is used.
+ */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @num_binds: number of binds in this IOCTL */
+ __u32 num_binds;
+
+ union {
+ /** @bind: used if num_binds == 1 */
+ struct drm_xe_vm_bind_op bind;
+
+ /**
+ * @vector_of_binds: userptr to array of struct
+ * drm_xe_vm_bind_op if num_binds > 1
+ */
+ __u64 vector_of_binds;
+ };
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @num_syncs: amount of syncs to wait on */
+ __u32 num_syncs;
+
+ /** @syncs: pointer to struct drm_xe_sync array */
+ __u64 syncs;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ *
+ * The example below shows how to use @drm_xe_exec_queue_create to create
+ * a simple exec_queue (no parallel submission) of class
+ * &DRM_XE_ENGINE_CLASS_RENDER.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_engine_class_instance instance = {
+ * .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
+ * };
+ * struct drm_xe_exec_queue_create exec_queue_create = {
+ * .extensions = 0,
+ * .vm_id = vm,
+ * .num_bb_per_exec = 1,
+ * .num_eng_per_bb = 1,
+ * .instances = to_user_pointer(&instance),
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
+ *
+ */
+struct drm_xe_exec_queue_create {
+#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
+
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @width: submission width (number BB per exec) for this exec queue */
+ __u16 width;
+
+ /** @num_placements: number of valid placements for this exec queue */
+ __u16 num_placements;
+
+ /** @vm_id: VM to use for this exec queue */
+ __u32 vm_id;
+
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @exec_queue_id: Returned exec queue ID */
+ __u32 exec_queue_id;
+
+ /**
+ * @instances: user pointer to a 2-d array of struct
+ * drm_xe_engine_class_instance
+ *
+ * length = width (i) * num_placements (j)
+ * index = j + i * width
+ */
+ __u64 instances;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ */
+struct drm_xe_exec_queue_destroy {
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ *
+ * The @property can be:
+ * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
+ */
+struct drm_xe_exec_queue_get_property {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
+ /** @property: property to get */
+ __u32 property;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_sync - sync object
+ *
+ * The @type can be:
+ * - %DRM_XE_SYNC_TYPE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_USER_FENCE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_SYNC_FLAG_SIGNAL
+ *
+ * A minimal use of @drm_xe_sync looks like this:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_sync sync = {
+ * .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ * .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * };
+ * struct drm_syncobj_create syncobj_create = { 0 };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
+ * sync.handle = syncobj_create.handle;
+ * ...
+ * use of &sync in drm_xe_exec or drm_xe_vm_bind
+ * ...
+ * struct drm_syncobj_wait wait = {
+ * .handles = &sync.handle,
+ * .timeout_nsec = INT64_MAX,
+ * .count_handles = 1,
+ * .flags = 0,
+ * .first_signaled = 0,
+ * .pad = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
+ */
+struct drm_xe_sync {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
+#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
+#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
+ /** @type: Type of the this sync object */
+ __u32 type;
+
+#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
+ /** @flags: Sync Flags */
+ __u32 flags;
+
+ union {
+ /** @handle: Handle for the object */
+ __u32 handle;
+
+ /**
+ * @addr: Address of user fence. When sync is passed in via exec
+ * IOCTL this is a GPU address in the VM. When sync passed in via
+ * VM bind IOCTL this is a user pointer. In either case, it is
+ * the users responsibility that this address is present and
+ * mapped when the user fence is signalled. Must be qword
+ * aligned.
+ */
+ __u64 addr;
+ };
+
+ /**
+ * @timeline_value: Input for the timeline sync object. Needs to be
+ * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
+ *
+ * This is an example to use @drm_xe_exec for execution of the object
+ * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
+ * (see example in @drm_xe_exec_queue_create). It can be synchronized
+ * by using the example provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_exec exec = {
+ * .exec_queue_id = exec_queue,
+ * .syncs = &sync,
+ * .num_syncs = 1,
+ * .address = BIND_ADDRESS,
+ * .num_batch_buffer = 1,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
+ *
+ */
+struct drm_xe_exec {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID for the batch buffer */
+ __u32 exec_queue_id;
+
+ /** @num_syncs: Amount of struct drm_xe_sync in array. */
+ __u32 num_syncs;
+
+ /** @syncs: Pointer to struct drm_xe_sync array. */
+ __u64 syncs;
+
+ /**
+ * @address: address of batch buffer if num_batch_buffer == 1 or an
+ * array of batch buffer addresses
+ */
+ __u64 address;
+
+ /**
+ * @num_batch_buffer: number of batch buffer in this exec, must match
+ * the width of the engine
+ */
+ __u16 num_batch_buffer;
+
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
+ *
+ * Wait on user fence, XE will wake-up on every HW engine interrupt in the
+ * instances list and check if user fence is complete::
+ *
+ * (*addr & MASK) OP (VALUE & MASK)
+ *
+ * Returns to user on user fence completion or timeout.
+ *
+ * The @op can be:
+ * - %DRM_XE_UFENCE_WAIT_OP_EQ
+ * - %DRM_XE_UFENCE_WAIT_OP_NEQ
+ * - %DRM_XE_UFENCE_WAIT_OP_GT
+ * - %DRM_XE_UFENCE_WAIT_OP_GTE
+ * - %DRM_XE_UFENCE_WAIT_OP_LT
+ * - %DRM_XE_UFENCE_WAIT_OP_LTE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
+ * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
+ *
+ * The @mask values can be for example:
+ * - 0xffu for u8
+ * - 0xffffu for u16
+ * - 0xffffffffu for u32
+ * - 0xffffffffffffffffu for u64
+ */
+struct drm_xe_wait_user_fence {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @addr: user pointer address to wait on, must qword aligned
+ */
+ __u64 addr;
+
+#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
+#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
+#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
+#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
+#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
+#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
+ /** @op: wait operation (type of comparison) */
+ __u16 op;
+
+#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
+ /** @flags: wait flags */
+ __u16 flags;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: compare value */
+ __u64 value;
+
+ /** @mask: comparison mask */
+ __u64 mask;
+
+ /**
+ * @timeout: how long to wait before bailing, value in nanoseconds.
+ * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
+ * it contains timeout expressed in nanoseconds to wait (fence will
+ * expire at now() + timeout).
+ * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
+ * will end at timeout (uses system MONOTONIC_CLOCK).
+ * Passing negative timeout leads to neverending wait.
+ *
+ * On relative timeout this value is updated with timeout left
+ * (for restarting the call in case of signal delivery).
+ * On absolute timeout this value stays intact (restarted call still
+ * expire at the same point of time).
+ */
+ __s64 timeout;
+
+ /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
+ __u32 exec_queue_id;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_XE_DRM_H_ */
diff --git a/include/uapi/linux/affs_hardblocks.h b/include/uapi/linux/affs_hardblocks.h
index 5e2fb8481252..a5aff2eb5f70 100644
--- a/include/uapi/linux/affs_hardblocks.h
+++ b/include/uapi/linux/affs_hardblocks.h
@@ -7,42 +7,42 @@
/* Just the needed definitions for the RDB of an Amiga HD. */
struct RigidDiskBlock {
- __u32 rdb_ID;
+ __be32 rdb_ID;
__be32 rdb_SummedLongs;
- __s32 rdb_ChkSum;
- __u32 rdb_HostID;
+ __be32 rdb_ChkSum;
+ __be32 rdb_HostID;
__be32 rdb_BlockBytes;
- __u32 rdb_Flags;
- __u32 rdb_BadBlockList;
+ __be32 rdb_Flags;
+ __be32 rdb_BadBlockList;
__be32 rdb_PartitionList;
- __u32 rdb_FileSysHeaderList;
- __u32 rdb_DriveInit;
- __u32 rdb_Reserved1[6];
- __u32 rdb_Cylinders;
- __u32 rdb_Sectors;
- __u32 rdb_Heads;
- __u32 rdb_Interleave;
- __u32 rdb_Park;
- __u32 rdb_Reserved2[3];
- __u32 rdb_WritePreComp;
- __u32 rdb_ReducedWrite;
- __u32 rdb_StepRate;
- __u32 rdb_Reserved3[5];
- __u32 rdb_RDBBlocksLo;
- __u32 rdb_RDBBlocksHi;
- __u32 rdb_LoCylinder;
- __u32 rdb_HiCylinder;
- __u32 rdb_CylBlocks;
- __u32 rdb_AutoParkSeconds;
- __u32 rdb_HighRDSKBlock;
- __u32 rdb_Reserved4;
+ __be32 rdb_FileSysHeaderList;
+ __be32 rdb_DriveInit;
+ __be32 rdb_Reserved1[6];
+ __be32 rdb_Cylinders;
+ __be32 rdb_Sectors;
+ __be32 rdb_Heads;
+ __be32 rdb_Interleave;
+ __be32 rdb_Park;
+ __be32 rdb_Reserved2[3];
+ __be32 rdb_WritePreComp;
+ __be32 rdb_ReducedWrite;
+ __be32 rdb_StepRate;
+ __be32 rdb_Reserved3[5];
+ __be32 rdb_RDBBlocksLo;
+ __be32 rdb_RDBBlocksHi;
+ __be32 rdb_LoCylinder;
+ __be32 rdb_HiCylinder;
+ __be32 rdb_CylBlocks;
+ __be32 rdb_AutoParkSeconds;
+ __be32 rdb_HighRDSKBlock;
+ __be32 rdb_Reserved4;
char rdb_DiskVendor[8];
char rdb_DiskProduct[16];
char rdb_DiskRevision[4];
char rdb_ControllerVendor[8];
char rdb_ControllerProduct[16];
char rdb_ControllerRevision[4];
- __u32 rdb_Reserved5[10];
+ __be32 rdb_Reserved5[10];
};
#define IDNAME_RIGIDDISK 0x5244534B /* "RDSK" */
@@ -50,16 +50,16 @@ struct RigidDiskBlock {
struct PartitionBlock {
__be32 pb_ID;
__be32 pb_SummedLongs;
- __s32 pb_ChkSum;
- __u32 pb_HostID;
+ __be32 pb_ChkSum;
+ __be32 pb_HostID;
__be32 pb_Next;
- __u32 pb_Flags;
- __u32 pb_Reserved1[2];
- __u32 pb_DevFlags;
+ __be32 pb_Flags;
+ __be32 pb_Reserved1[2];
+ __be32 pb_DevFlags;
__u8 pb_DriveName[32];
- __u32 pb_Reserved2[15];
+ __be32 pb_Reserved2[15];
__be32 pb_Environment[17];
- __u32 pb_EReserved[15];
+ __be32 pb_EReserved[15];
};
#define IDNAME_PARTITION 0x50415254 /* "PART" */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 5f636b5afcd7..d44a8118b2ed 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -251,20 +251,22 @@ struct binder_extended_error {
__s32 param;
};
-#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
-#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
-#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
-#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
-#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
-#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
-#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
-#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
-#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
-#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
-#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
-#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)
-#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error)
+enum {
+ BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read),
+ BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64),
+ BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32),
+ BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32),
+ BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32),
+ BINDER_THREAD_EXIT = _IOW('b', 8, __s32),
+ BINDER_VERSION = _IOWR('b', 9, struct binder_version),
+ BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info),
+ BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref),
+ BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object),
+ BINDER_FREEZE = _IOW('b', 14, struct binder_freeze_info),
+ BINDER_GET_FROZEN_INFO = _IOWR('b', 15, struct binder_frozen_status_info),
+ BINDER_ENABLE_ONEWAY_SPAM_DETECTION = _IOW('b', 16, __u32),
+ BINDER_GET_EXTENDED_ERROR = _IOWR('b', 17, struct binder_extended_error),
+};
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h
index 62e625356dc8..08be539605fc 100644
--- a/include/uapi/linux/auto_dev-ioctl.h
+++ b/include/uapi/linux/auto_dev-ioctl.h
@@ -109,7 +109,7 @@ struct autofs_dev_ioctl {
struct args_ismountpoint ismountpoint;
};
- char path[0];
+ char path[];
};
static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index 9204e4494b25..6e25753015df 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -116,6 +116,9 @@ enum batadv_icmp_packettype {
* only need routable IPv4 multicast packets we signed up for explicitly
* @BATADV_MCAST_WANT_NO_RTR6: we have no IPv6 multicast router and therefore
* only need routable IPv6 multicast packets we signed up for explicitly
+ * @BATADV_MCAST_HAVE_MC_PTYPE_CAPA: we can parse, receive and forward
+ * batman-adv multicast packets with a multicast tracker TVLV. And all our
+ * hard interfaces have an MTU of at least 1280 bytes.
*/
enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_UNSNOOPABLES = 1UL << 0,
@@ -123,6 +126,7 @@ enum batadv_mcast_flags {
BATADV_MCAST_WANT_ALL_IPV6 = 1UL << 2,
BATADV_MCAST_WANT_NO_RTR4 = 1UL << 3,
BATADV_MCAST_WANT_NO_RTR6 = 1UL << 4,
+ BATADV_MCAST_HAVE_MC_PTYPE_CAPA = 1UL << 5,
};
/* tt data subtypes */
@@ -174,14 +178,16 @@ enum batadv_bla_claimframe {
* @BATADV_TVLV_TT: translation table tvlv
* @BATADV_TVLV_ROAM: roaming advertisement tvlv
* @BATADV_TVLV_MCAST: multicast capability tvlv
+ * @BATADV_TVLV_MCAST_TRACKER: multicast tracker tvlv
*/
enum batadv_tvlv_type {
- BATADV_TVLV_GW = 0x01,
- BATADV_TVLV_DAT = 0x02,
- BATADV_TVLV_NC = 0x03,
- BATADV_TVLV_TT = 0x04,
- BATADV_TVLV_ROAM = 0x05,
- BATADV_TVLV_MCAST = 0x06,
+ BATADV_TVLV_GW = 0x01,
+ BATADV_TVLV_DAT = 0x02,
+ BATADV_TVLV_NC = 0x03,
+ BATADV_TVLV_TT = 0x04,
+ BATADV_TVLV_ROAM = 0x05,
+ BATADV_TVLV_MCAST = 0x06,
+ BATADV_TVLV_MCAST_TRACKER = 0x07,
};
#pragma pack(2)
@@ -488,6 +494,25 @@ struct batadv_bcast_packet {
};
/**
+ * struct batadv_mcast_packet - multicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
+ * @reserved: reserved byte for alignment
+ * @tvlv_len: length of the appended tvlv buffer (in bytes)
+ */
+struct batadv_mcast_packet {
+ __u8 packet_type;
+ __u8 version;
+ __u8 ttl;
+ __u8 reserved;
+ __be16 tvlv_len;
+ /* "4 bytes boundary + 2 bytes" long to make the payload after the
+ * following ethernet header again 4 bytes boundary aligned
+ */
+};
+
+/**
* struct batadv_coded_packet - network coded packet
* @packet_type: batman-adv packet type, part of the general header
* @version: batman-adv protocol version, part of the general header
@@ -628,6 +653,14 @@ struct batadv_tvlv_mcast_data {
__u8 reserved[3];
};
+/**
+ * struct batadv_tvlv_mcast_tracker - payload of a multicast tracker tvlv
+ * @num_dests: number of subsequent destination originator MAC addresses
+ */
+struct batadv_tvlv_mcast_tracker {
+ __be16 num_dests;
+};
+
#pragma pack()
#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index b80fcc9ea525..f85743ef6e7d 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -51,13 +51,13 @@ enum blk_zone_type {
*
* The Zone Condition state machine in the ZBC/ZAC standards maps the above
* deinitions as:
- * - ZC1: Empty | BLK_ZONE_EMPTY
+ * - ZC1: Empty | BLK_ZONE_COND_EMPTY
* - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
* - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
- * - ZC4: Closed | BLK_ZONE_CLOSED
- * - ZC5: Full | BLK_ZONE_FULL
- * - ZC6: Read Only | BLK_ZONE_READONLY
- * - ZC7: Offline | BLK_ZONE_OFFLINE
+ * - ZC4: Closed | BLK_ZONE_COND_CLOSED
+ * - ZC5: Full | BLK_ZONE_COND_FULL
+ * - ZC6: Read Only | BLK_ZONE_COND_READONLY
+ * - ZC7: Offline | BLK_ZONE_COND_OFFLINE
*
* Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
* be considered invalid.
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index c994ff5b157c..754e68ca8744 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -19,6 +19,7 @@
/* ld/ldx fields */
#define BPF_DW 0x18 /* double word (64-bit) */
+#define BPF_MEMSX 0x80 /* load with sign extension */
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
@@ -931,7 +932,14 @@ enum bpf_map_type {
*/
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
- BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
+ /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
+ * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
+ * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * deprecated.
+ */
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
@@ -1036,6 +1044,16 @@ enum bpf_attach_type {
BPF_LSM_CGROUP,
BPF_STRUCT_OPS,
BPF_NETFILTER,
+ BPF_TCX_INGRESS,
+ BPF_TCX_EGRESS,
+ BPF_TRACE_UPROBE_MULTI,
+ BPF_CGROUP_UNIX_CONNECT,
+ BPF_CGROUP_UNIX_SENDMSG,
+ BPF_CGROUP_UNIX_RECVMSG,
+ BPF_CGROUP_UNIX_GETPEERNAME,
+ BPF_CGROUP_UNIX_GETSOCKNAME,
+ BPF_NETKIT_PRIMARY,
+ BPF_NETKIT_PEER,
__MAX_BPF_ATTACH_TYPE
};
@@ -1053,8 +1071,22 @@ enum bpf_link_type {
BPF_LINK_TYPE_KPROBE_MULTI = 8,
BPF_LINK_TYPE_STRUCT_OPS = 9,
BPF_LINK_TYPE_NETFILTER = 10,
+ BPF_LINK_TYPE_TCX = 11,
+ BPF_LINK_TYPE_UPROBE_MULTI = 12,
+ BPF_LINK_TYPE_NETKIT = 13,
+ __MAX_BPF_LINK_TYPE,
+};
- MAX_BPF_LINK_TYPE,
+#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
+
+enum bpf_perf_event_type {
+ BPF_PERF_EVENT_UNSPEC = 0,
+ BPF_PERF_EVENT_UPROBE = 1,
+ BPF_PERF_EVENT_URETPROBE = 2,
+ BPF_PERF_EVENT_KPROBE = 3,
+ BPF_PERF_EVENT_KRETPROBE = 4,
+ BPF_PERF_EVENT_TRACEPOINT = 5,
+ BPF_PERF_EVENT_EVENT = 6,
};
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
@@ -1103,7 +1135,12 @@ enum bpf_link_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
+/* Generic attachment flags. */
#define BPF_F_REPLACE (1U << 2)
+#define BPF_F_BEFORE (1U << 3)
+#define BPF_F_AFTER (1U << 4)
+#define BPF_F_ID (1U << 5)
+#define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
@@ -1165,10 +1202,27 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
+/* The verifier internal test flag. Behavior is undefined */
+#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
+
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
-#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
+enum {
+ BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
+};
+
+/* link_create.uprobe_multi.flags used in LINK_CREATE command for
+ * BPF_TRACE_UPROBE_MULTI attach type to create return probe.
+ */
+enum {
+ BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
+};
+
+/* link_create.netfilter.flags used in LINK_CREATE command for
+ * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
+ */
+#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0)
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
@@ -1273,6 +1327,9 @@ enum {
/* Create a map that will be registered/unregesitered by the backed bpf_link */
BPF_F_LINK = (1U << 13),
+
+/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
+ BPF_F_PATH_FD = (1U << 14),
};
/* Flags for BPF_PROG_QUERY. */
@@ -1421,17 +1478,29 @@ union bpf_attr {
__aligned_u64 pathname;
__u32 bpf_fd;
__u32 file_flags;
+ /* Same as dirfd in openat() syscall; see openat(2)
+ * manpage for details of path FD and pathname semantics;
+ * path_fd should accompanied by BPF_F_PATH_FD flag set in
+ * file_flags field, otherwise it should be set to zero;
+ * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed.
+ */
+ __s32 path_fd;
};
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
- __u32 target_fd; /* container object to attach to */
- __u32 attach_bpf_fd; /* eBPF program to attach */
+ union {
+ __u32 target_fd; /* target object to attach to or ... */
+ __u32 target_ifindex; /* target ifindex */
+ };
+ __u32 attach_bpf_fd;
__u32 attach_type;
__u32 attach_flags;
- __u32 replace_bpf_fd; /* previously attached eBPF
- * program to replace if
- * BPF_F_REPLACE is used
- */
+ __u32 replace_bpf_fd;
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
};
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
@@ -1477,16 +1546,26 @@ union bpf_attr {
} info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */
- __u32 target_fd; /* container object to query */
+ union {
+ __u32 target_fd; /* target object to query or ... */
+ __u32 target_ifindex; /* target ifindex */
+ };
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__aligned_u64 prog_ids;
- __u32 prog_cnt;
+ union {
+ __u32 prog_cnt;
+ __u32 count;
+ };
+ __u32 :32;
/* output: per-program attach_flags.
* not allowed to be set during effective query.
*/
__aligned_u64 prog_attach_flags;
+ __aligned_u64 link_ids;
+ __aligned_u64 link_attach_flags;
+ __u64 revision;
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
@@ -1529,13 +1608,13 @@ union bpf_attr {
__u32 map_fd; /* struct_ops to attach */
};
union {
- __u32 target_fd; /* object to attach to */
- __u32 target_ifindex; /* target ifindex */
+ __u32 target_fd; /* target object to attach to or ... */
+ __u32 target_ifindex; /* target ifindex */
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
union {
- __u32 target_btf_id; /* btf_id of target to attach to */
+ __u32 target_btf_id; /* btf_id of target to attach to */
struct {
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
__u32 iter_info_len; /* iter_info length */
@@ -1569,6 +1648,29 @@ union bpf_attr {
__s32 priority;
__u32 flags;
} netfilter;
+ struct {
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
+ } tcx;
+ struct {
+ __aligned_u64 path;
+ __aligned_u64 offsets;
+ __aligned_u64 ref_ctr_offsets;
+ __aligned_u64 cookies;
+ __u32 cnt;
+ __u32 flags;
+ __u32 pid;
+ } uprobe_multi;
+ struct {
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
+ } netkit;
};
} link_create;
@@ -1887,7 +1989,9 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
- * 0 on success, or a negative error in case of failure.
+ * 0 on success, or a negative error in case of failure. Positive
+ * error indicates a potential drop or congestion in the target
+ * device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Description
@@ -2620,8 +2724,8 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
- * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
- * and **BPF_CGROUP_INET6_CONNECT**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
+ * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **setsockopt()**.
* It supports the following *level*\ s:
@@ -2859,8 +2963,8 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
- * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
- * and **BPF_CGROUP_INET6_CONNECT**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
+ * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **getsockopt()**.
* It supports the same set of *optname*\ s that is supported by
@@ -3168,6 +3272,10 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
* rules.
+ * **BPF_FIB_LOOKUP_TBID**
+ * Used with BPF_FIB_LOOKUP_DIRECT.
+ * Use the routing table ID present in *params*->tbid
+ * for the fib lookup.
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
@@ -3176,6 +3284,11 @@ union bpf_attr {
* and *params*->smac will not be set as output. A common
* use case is to call **bpf_redirect_neigh**\ () after
* doing **bpf_fib_lookup**\ ().
+ * **BPF_FIB_LOOKUP_SRC**
+ * Derive and set source IP addr in *params*->ipv{4,6}_src
+ * for the nexthop. If the src addr cannot be derived,
+ * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
+ * case, *params*->dmac and *params*->smac are not set either.
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@@ -4145,9 +4258,6 @@ union bpf_attr {
* **-EOPNOTSUPP** if the operation is not supported, for example
* a call from outside of TC ingress.
*
- * **-ESOCKTNOSUPPORT** if the socket type is not supported
- * (reuseport).
- *
* long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
* Description
* Helper is overloaded depending on BPF program type. This
@@ -4412,6 +4522,8 @@ union bpf_attr {
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
+ * Note: the user stack will only be populated if the *task* is
+ * the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
* pointer to **struct task_struct**. To store the stacktrace, the
* bpf program provides *buf* with a nonnegative *size*.
@@ -4423,6 +4535,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
+ * The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
@@ -5011,6 +5124,8 @@ union bpf_attr {
* **BPF_F_TIMER_ABS**
* Start the timer in absolute expire value instead of the
* default relative one.
+ * **BPF_F_TIMER_CPU_PIN**
+ * Timer will be pinned to the CPU of the caller.
*
* Return
* 0 on success.
@@ -5030,9 +5145,14 @@ union bpf_attr {
* u64 bpf_get_func_ip(void *ctx)
* Description
* Get address of the traced function (for tracing and kprobe programs).
+ *
+ * When called for kprobe program attached as uprobe it returns
+ * probe address for both entry and return uprobe.
+ *
* Return
- * Address of the traced function.
+ * Address of the traced function for kprobe.
* 0 for kprobes placed within the function (not at the entry).
+ * Address of the probe for uprobe and return uprobe.
*
* u64 bpf_get_attach_cookie(void *ctx)
* Description
@@ -6173,6 +6293,19 @@ struct bpf_sock_tuple {
};
};
+/* (Simplified) user return codes for tcx prog type.
+ * A valid tcx program must return one of these defined values. All other
+ * return codes are reserved for future use. Must remain compatible with
+ * their TC_ACT_* counter-parts. For compatibility in behavior, unknown
+ * return codes are mapped to TCX_NEXT.
+ */
+enum tcx_action_base {
+ TCX_NEXT = -1,
+ TCX_PASS = 0,
+ TCX_DROP = 2,
+ TCX_REDIRECT = 7,
+};
+
struct bpf_xdp_sock {
__u32 queue_id;
};
@@ -6425,6 +6558,56 @@ struct bpf_link_info {
__s32 priority;
__u32 flags;
} netfilter;
+ struct {
+ __aligned_u64 addrs;
+ __u32 count; /* in/out: kprobe_multi function count */
+ __u32 flags;
+ __u64 missed;
+ } kprobe_multi;
+ struct {
+ __aligned_u64 path;
+ __aligned_u64 offsets;
+ __aligned_u64 ref_ctr_offsets;
+ __aligned_u64 cookies;
+ __u32 path_size; /* in/out: real path size on success, including zero byte */
+ __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
+ __u32 flags;
+ __u32 pid;
+ } uprobe_multi;
+ struct {
+ __u32 type; /* enum bpf_perf_event_type */
+ __u32 :32;
+ union {
+ struct {
+ __aligned_u64 file_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from file_name */
+ } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
+ struct {
+ __aligned_u64 func_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from func_name */
+ __u64 addr;
+ __u64 missed;
+ } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
+ struct {
+ __aligned_u64 tp_name; /* in/out */
+ __u32 name_len;
+ } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
+ struct {
+ __u64 config;
+ __u32 type;
+ } event; /* BPF_PERF_EVENT_EVENT */
+ };
+ } perf_event;
+ struct {
+ __u32 ifindex;
+ __u32 attach_type;
+ } tcx;
+ struct {
+ __u32 ifindex;
+ __u32 attach_type;
+ } netkit;
};
} __attribute__((aligned(8)));
@@ -6721,6 +6904,7 @@ enum {
BPF_TCP_LISTEN,
BPF_TCP_CLOSING, /* Now a valid state */
BPF_TCP_NEW_SYN_RECV,
+ BPF_TCP_BOUND_INACTIVE,
BPF_TCP_MAX_STATES /* Leave at the end! */
};
@@ -6822,6 +7006,8 @@ enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ BPF_FIB_LOOKUP_TBID = (1U << 3),
+ BPF_FIB_LOOKUP_SRC = (1U << 4),
};
enum {
@@ -6834,6 +7020,7 @@ enum {
BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+ BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
};
struct bpf_fib_lookup {
@@ -6868,6 +7055,9 @@ struct bpf_fib_lookup {
__u32 rt_metric;
};
+ /* input: source address to consider for lookup
+ * output: source address result from lookup
+ */
union {
__be32 ipv4_src;
__u32 ipv6_src[4]; /* in6_addr; network order */
@@ -6882,9 +7072,19 @@ struct bpf_fib_lookup {
__u32 ipv6_dst[4]; /* in6_addr; network order */
};
- /* output */
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
+ union {
+ struct {
+ /* output */
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ };
+ /* input: when accompanied with the
+ * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
+ * specific routing table to use for the fib lookup.
+ */
+ __u32 tbid;
+ };
+
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
};
@@ -6970,38 +7170,31 @@ struct bpf_spin_lock {
};
struct bpf_timer {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_head {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_node {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[3];
} __attribute__((aligned(8)));
struct bpf_rb_root {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_rb_node {
- __u64 :64;
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[4];
} __attribute__((aligned(8)));
struct bpf_refcount {
- __u32 :32;
+ __u32 __opaque[1];
} __attribute__((aligned(4)));
struct bpf_sysctl {
@@ -7157,9 +7350,11 @@ struct bpf_core_relo {
* Flags to control bpf_timer_start() behaviour.
* - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
* relative to current time.
+ * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
*/
enum {
BPF_F_TIMER_ABS = (1ULL << 0),
+ BPF_F_TIMER_CPU_PIN = (1ULL << 1),
};
/* BPF numbers iterator state */
diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h
deleted file mode 100644
index cbc1f5813f50..000000000000
--- a/include/uapi/linux/bpfilter.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_BPFILTER_H
-#define _UAPI_LINUX_BPFILTER_H
-
-#include <linux/if.h>
-
-enum {
- BPFILTER_IPT_SO_SET_REPLACE = 64,
- BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65,
- BPFILTER_IPT_SET_MAX,
-};
-
-enum {
- BPFILTER_IPT_SO_GET_INFO = 64,
- BPFILTER_IPT_SO_GET_ENTRIES = 65,
- BPFILTER_IPT_SO_GET_REVISION_MATCH = 66,
- BPFILTER_IPT_SO_GET_REVISION_TARGET = 67,
- BPFILTER_IPT_GET_MAX,
-};
-
-#endif /* _UAPI_LINUX_BPFILTER_H */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index dbb8b96da50d..f8bc34a6bcfa 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -333,6 +333,8 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
#define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12)
#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
+#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
+#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
@@ -612,6 +614,9 @@ struct btrfs_ioctl_clone_range_args {
*/
#define BTRFS_DEFRAG_RANGE_COMPRESS 1
#define BTRFS_DEFRAG_RANGE_START_IO 2
+#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP (BTRFS_DEFRAG_RANGE_COMPRESS | \
+ BTRFS_DEFRAG_RANGE_START_IO)
+
struct btrfs_ioctl_defrag_range_args {
/* start of the defrag operation */
__u64 start;
@@ -753,6 +758,7 @@ struct btrfs_ioctl_get_dev_stats {
#define BTRFS_QUOTA_CTL_ENABLE 1
#define BTRFS_QUOTA_CTL_DISABLE 2
#define BTRFS_QUOTA_CTL_RESCAN__NOTUSED 3
+#define BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA 4
struct btrfs_ioctl_quota_ctl_args {
__u64 cmd;
__u64 status;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index ab38d0f411fa..d24e8e121507 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -73,6 +73,9 @@
/* Holds the block group items for extent tree v2. */
#define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL
+/* Tracks RAID stripes in block groups. */
+#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
+
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
@@ -216,11 +219,31 @@
*/
#define BTRFS_METADATA_ITEM_KEY 169
+/*
+ * Special inline ref key which stores the id of the subvolume which originally
+ * created the extent. This subvolume owns the extent permanently from the
+ * perspective of simple quotas. Needed to know which subvolume to free quota
+ * usage from when the extent is deleted.
+ *
+ * Stored as an inline ref rather to avoid wasting space on a separate item on
+ * top of the existing extent item. However, unlike the other inline refs,
+ * there is one one owner ref per extent rather than one per extent.
+ *
+ * Because of this, it goes at the front of the list of inline refs, and thus
+ * must have a lower type value than any other inline ref type (to satisfy the
+ * disk format rule that inline refs have non-decreasing type).
+ */
+#define BTRFS_EXTENT_OWNER_REF_KEY 172
+
#define BTRFS_TREE_BLOCK_REF_KEY 176
#define BTRFS_EXTENT_DATA_REF_KEY 178
-#define BTRFS_EXTENT_REF_V0_KEY 180
+/*
+ * Obsolete key. Defintion removed in 6.6, value may be reused in the future.
+ *
+ * #define BTRFS_EXTENT_REF_V0_KEY 180
+ */
#define BTRFS_SHARED_BLOCK_REF_KEY 182
@@ -257,6 +280,8 @@
#define BTRFS_DEV_ITEM_KEY 216
#define BTRFS_CHUNK_ITEM_KEY 228
+#define BTRFS_RAID_STRIPE_KEY 230
+
/*
* Records the overall state of the qgroups.
* There's only one instance of this key present,
@@ -715,6 +740,30 @@ struct btrfs_free_space_header {
__le64 num_bitmaps;
} __attribute__ ((__packed__));
+struct btrfs_raid_stride {
+ /* The id of device this raid extent lives on. */
+ __le64 devid;
+ /* The physical location on disk. */
+ __le64 physical;
+} __attribute__ ((__packed__));
+
+/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */
+#define BTRFS_STRIPE_RAID0 1
+#define BTRFS_STRIPE_RAID1 2
+#define BTRFS_STRIPE_DUP 3
+#define BTRFS_STRIPE_RAID10 4
+#define BTRFS_STRIPE_RAID5 5
+#define BTRFS_STRIPE_RAID6 6
+#define BTRFS_STRIPE_RAID1C3 7
+#define BTRFS_STRIPE_RAID1C4 8
+
+struct btrfs_stripe_extent {
+ __u8 encoding;
+ __u8 reserved[7];
+ /* An array of raid strides this stripe is composed of. */
+ struct btrfs_raid_stride strides[];
+} __attribute__ ((__packed__));
+
#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
@@ -783,6 +832,10 @@ struct btrfs_shared_data_ref {
__le32 count;
} __attribute__ ((__packed__));
+struct btrfs_extent_owner_ref {
+ __le64 root_id;
+} __attribute__ ((__packed__));
+
struct btrfs_extent_inline_ref {
__u8 type;
__le64 offset;
@@ -1200,9 +1253,17 @@ static inline __u16 btrfs_qgroup_level(__u64 qgroupid)
*/
#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
+/*
+ * Whether or not this filesystem is using simple quotas. Not exactly the
+ * incompat bit, because we support using simple quotas, disabling it, then
+ * going back to full qgroup quotas.
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE (1ULL << 3)
+
#define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \
BTRFS_QGROUP_STATUS_FLAG_RESCAN | \
- BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | \
+ BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
#define BTRFS_QGROUP_STATUS_VERSION 1
@@ -1224,6 +1285,15 @@ struct btrfs_qgroup_status_item {
* of the scan. It contains a logical address
*/
__le64 rescan;
+
+ /*
+ * The generation when quotas were last enabled. Used by simple quotas to
+ * avoid decrementing when freeing an extent that was written before
+ * enable.
+ *
+ * Set only if flags contain BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE.
+ */
+ __le64 enable_gen;
} __attribute__ ((__packed__));
struct btrfs_qgroup_info_item {
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index dd645ea72306..939db2388208 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -285,6 +285,5 @@ struct can_filter {
};
#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
-#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
#endif /* !_UAPI_CAN_H */
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index ff12f525c37c..31622c9b7988 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -49,6 +49,8 @@
#include <linux/can.h>
#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW)
+#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
+
enum {
SCM_CAN_RAW_ERRQUEUE = 1,
};
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 3d61a0ae055d..5bb906098697 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -41,11 +41,12 @@ typedef struct __user_cap_header_struct {
int pid;
} __user *cap_user_header_t;
-typedef struct __user_cap_data_struct {
+struct __user_cap_data_struct {
__u32 effective;
__u32 permitted;
__u32 inheritable;
-} __user *cap_user_data_t;
+};
+typedef struct __user_cap_data_struct __user *cap_user_data_t;
#define VFS_CAP_REVISION_MASK 0xFF000000
diff --git a/include/uapi/linux/cgroupstats.h b/include/uapi/linux/cgroupstats.h
index aa306e4cd6c1..80b2c8594480 100644
--- a/include/uapi/linux/cgroupstats.h
+++ b/include/uapi/linux/cgroupstats.h
@@ -24,8 +24,6 @@
* basis. This data is shared using taskstats.
*
* Most of these states are derived by looking at the task->state value
- * For the nr_io_wait state, a flag in the delay accounting structure
- * indicates that the task is waiting on IO
*
* Each member is aligned to a 8 byte boundary.
*/
diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
index db210625cee8..f2afb7cc4926 100644
--- a/include/uapi/linux/cn_proc.h
+++ b/include/uapi/linux/cn_proc.h
@@ -30,6 +30,49 @@ enum proc_cn_mcast_op {
PROC_CN_MCAST_IGNORE = 2
};
+#define PROC_EVENT_ALL (PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_UID | \
+ PROC_EVENT_GID | PROC_EVENT_SID | PROC_EVENT_PTRACE | \
+ PROC_EVENT_COMM | PROC_EVENT_NONZERO_EXIT | \
+ PROC_EVENT_COREDUMP | PROC_EVENT_EXIT)
+
+/*
+ * If you add an entry in proc_cn_event, make sure you add it in
+ * PROC_EVENT_ALL above as well.
+ */
+enum proc_cn_event {
+ /* Use successive bits so the enums can be used to record
+ * sets of events as well
+ */
+ PROC_EVENT_NONE = 0x00000000,
+ PROC_EVENT_FORK = 0x00000001,
+ PROC_EVENT_EXEC = 0x00000002,
+ PROC_EVENT_UID = 0x00000004,
+ PROC_EVENT_GID = 0x00000040,
+ PROC_EVENT_SID = 0x00000080,
+ PROC_EVENT_PTRACE = 0x00000100,
+ PROC_EVENT_COMM = 0x00000200,
+ /* "next" should be 0x00000400 */
+ /* "last" is the last process event: exit,
+ * while "next to last" is coredumping event
+ * before that is report only if process dies
+ * with non-zero exit status
+ */
+ PROC_EVENT_NONZERO_EXIT = 0x20000000,
+ PROC_EVENT_COREDUMP = 0x40000000,
+ PROC_EVENT_EXIT = 0x80000000
+};
+
+struct proc_input {
+ enum proc_cn_mcast_op mcast_op;
+ enum proc_cn_event event_type;
+};
+
+static inline enum proc_cn_event valid_event(enum proc_cn_event ev_type)
+{
+ ev_type &= PROC_EVENT_ALL;
+ return ev_type;
+}
+
/*
* From the user's point of view, the process
* ID is the thread group ID and thread ID is the internal
@@ -44,24 +87,7 @@ enum proc_cn_mcast_op {
*/
struct proc_event {
- enum what {
- /* Use successive bits so the enums can be used to record
- * sets of events as well
- */
- PROC_EVENT_NONE = 0x00000000,
- PROC_EVENT_FORK = 0x00000001,
- PROC_EVENT_EXEC = 0x00000002,
- PROC_EVENT_UID = 0x00000004,
- PROC_EVENT_GID = 0x00000040,
- PROC_EVENT_SID = 0x00000080,
- PROC_EVENT_PTRACE = 0x00000100,
- PROC_EVENT_COMM = 0x00000200,
- /* "next" should be 0x00000400 */
- /* "last" is the last process event: exit,
- * while "next to last" is coredumping event */
- PROC_EVENT_COREDUMP = 0x40000000,
- PROC_EVENT_EXIT = 0x80000000
- } what;
+ enum proc_cn_event what;
__u32 cpu;
__u64 __attribute__((aligned(8))) timestamp_ns;
/* Number of nano seconds since system boot */
diff --git a/include/uapi/linux/counter.h b/include/uapi/linux/counter.h
index 8ab12d731e3b..008a691c254b 100644
--- a/include/uapi/linux/counter.h
+++ b/include/uapi/linux/counter.h
@@ -38,7 +38,7 @@ enum counter_scope {
*
* For example, if the Count 2 ceiling extension of Counter device 4 is desired,
* set type equal to COUNTER_COMPONENT_EXTENSION, scope equal to
- * COUNTER_COUNT_SCOPE, parent equal to 2, and id equal to the value provided by
+ * COUNTER_SCOPE_COUNT, parent equal to 2, and id equal to the value provided by
* the respective /sys/bus/counter/devices/counter4/count2/ceiling_component_id
* sysfs attribute.
*/
@@ -127,6 +127,12 @@ enum counter_count_mode {
COUNTER_COUNT_MODE_RANGE_LIMIT,
COUNTER_COUNT_MODE_NON_RECYCLE,
COUNTER_COUNT_MODE_MODULO_N,
+ COUNTER_COUNT_MODE_INTERRUPT_ON_TERMINAL_COUNT,
+ COUNTER_COUNT_MODE_HARDWARE_RETRIGGERABLE_ONESHOT,
+ COUNTER_COUNT_MODE_RATE_GENERATOR,
+ COUNTER_COUNT_MODE_SQUARE_WAVE_MODE,
+ COUNTER_COUNT_MODE_SOFTWARE_TRIGGERED_STROBE,
+ COUNTER_COUNT_MODE_HARDWARE_TRIGGERED_STROBE,
};
/* Count function values */
diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h
index 14bc6e742148..42066f4eb890 100644
--- a/include/uapi/linux/cxl_mem.h
+++ b/include/uapi/linux/cxl_mem.h
@@ -46,6 +46,7 @@
___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \
___DEPRECATED(SCAN_MEDIA, "Scan Media"), \
___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \
+ ___C(GET_TIMESTAMP, "Get Timestamp"), \
___C(MAX, "invalid / last command")
#define ___C(a, b) CXL_MEM_COMMAND_ID_##a
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 3782d4219ac9..130cae0d3e20 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -139,6 +139,8 @@ enum devlink_command {
DEVLINK_CMD_SELFTESTS_GET, /* can dump */
DEVLINK_CMD_SELFTESTS_RUN,
+ DEVLINK_CMD_NOTIFY_FILTER_SET,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -265,7 +267,7 @@ enum {
* Documentation/networking/devlink/devlink-flash.rst
*
*/
-enum {
+enum devlink_flash_overwrite {
DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT,
DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT,
@@ -661,6 +663,8 @@ enum devlink_resource_unit {
enum devlink_port_fn_attr_cap {
DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT,
DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT,
+ DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT,
+ DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT,
/* Add new caps above */
__DEVLINK_PORT_FN_ATTR_CAPS_MAX,
@@ -669,6 +673,8 @@ enum devlink_port_fn_attr_cap {
#define DEVLINK_PORT_FN_CAP_ROCE _BITUL(DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT)
#define DEVLINK_PORT_FN_CAP_MIGRATABLE \
_BITUL(DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT)
+#define DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT)
+#define DEVLINK_PORT_FN_CAP_IPSEC_PACKET _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT)
enum devlink_port_function_attr {
DEVLINK_PORT_FUNCTION_ATTR_UNSPEC,
@@ -676,6 +682,7 @@ enum devlink_port_function_attr {
DEVLINK_PORT_FN_ATTR_STATE, /* u8 */
DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */
DEVLINK_PORT_FN_ATTR_CAPS, /* bitfield32 */
+ DEVLINK_PORT_FN_ATTR_DEVLINK, /* nested */
__DEVLINK_PORT_FUNCTION_ATTR_MAX,
DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1
diff --git a/include/uapi/linux/dlm_plock.h b/include/uapi/linux/dlm_plock.h
index 63b6c1fd9169..eb66afcac40e 100644
--- a/include/uapi/linux/dlm_plock.h
+++ b/include/uapi/linux/dlm_plock.h
@@ -22,6 +22,7 @@ enum {
DLM_PLOCK_OP_LOCK = 1,
DLM_PLOCK_OP_UNLOCK,
DLM_PLOCK_OP_GET,
+ DLM_PLOCK_OP_CANCEL,
};
#define DLM_PLOCK_FL_CLOSE 1
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
new file mode 100644
index 000000000000..b4e947f9bfbc
--- /dev/null
+++ b/include/uapi/linux/dpll.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/dpll.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_DPLL_H
+#define _UAPI_LINUX_DPLL_H
+
+#define DPLL_FAMILY_NAME "dpll"
+#define DPLL_FAMILY_VERSION 1
+
+/**
+ * enum dpll_mode - working modes a dpll can support, differentiates if and how
+ * dpll selects one of its inputs to syntonize with it, valid values for
+ * DPLL_A_MODE attribute
+ * @DPLL_MODE_MANUAL: input can be only selected by sending a request to dpll
+ * @DPLL_MODE_AUTOMATIC: highest prio input pin auto selected by dpll
+ */
+enum dpll_mode {
+ DPLL_MODE_MANUAL = 1,
+ DPLL_MODE_AUTOMATIC,
+
+ /* private: */
+ __DPLL_MODE_MAX,
+ DPLL_MODE_MAX = (__DPLL_MODE_MAX - 1)
+};
+
+/**
+ * enum dpll_lock_status - provides information of dpll device lock status,
+ * valid values for DPLL_A_LOCK_STATUS attribute
+ * @DPLL_LOCK_STATUS_UNLOCKED: dpll was not yet locked to any valid input (or
+ * forced by setting DPLL_A_MODE to DPLL_MODE_DETACHED)
+ * @DPLL_LOCK_STATUS_LOCKED: dpll is locked to a valid signal, but no holdover
+ * available
+ * @DPLL_LOCK_STATUS_LOCKED_HO_ACQ: dpll is locked and holdover acquired
+ * @DPLL_LOCK_STATUS_HOLDOVER: dpll is in holdover state - lost a valid lock or
+ * was forced by disconnecting all the pins (latter possible only when dpll
+ * lock-state was already DPLL_LOCK_STATUS_LOCKED_HO_ACQ, if dpll lock-state
+ * was not DPLL_LOCK_STATUS_LOCKED_HO_ACQ, the dpll's lock-state shall remain
+ * DPLL_LOCK_STATUS_UNLOCKED)
+ */
+enum dpll_lock_status {
+ DPLL_LOCK_STATUS_UNLOCKED = 1,
+ DPLL_LOCK_STATUS_LOCKED,
+ DPLL_LOCK_STATUS_LOCKED_HO_ACQ,
+ DPLL_LOCK_STATUS_HOLDOVER,
+
+ /* private: */
+ __DPLL_LOCK_STATUS_MAX,
+ DPLL_LOCK_STATUS_MAX = (__DPLL_LOCK_STATUS_MAX - 1)
+};
+
+#define DPLL_TEMP_DIVIDER 1000
+
+/**
+ * enum dpll_type - type of dpll, valid values for DPLL_A_TYPE attribute
+ * @DPLL_TYPE_PPS: dpll produces Pulse-Per-Second signal
+ * @DPLL_TYPE_EEC: dpll drives the Ethernet Equipment Clock
+ */
+enum dpll_type {
+ DPLL_TYPE_PPS = 1,
+ DPLL_TYPE_EEC,
+
+ /* private: */
+ __DPLL_TYPE_MAX,
+ DPLL_TYPE_MAX = (__DPLL_TYPE_MAX - 1)
+};
+
+/**
+ * enum dpll_pin_type - defines possible types of a pin, valid values for
+ * DPLL_A_PIN_TYPE attribute
+ * @DPLL_PIN_TYPE_MUX: aggregates another layer of selectable pins
+ * @DPLL_PIN_TYPE_EXT: external input
+ * @DPLL_PIN_TYPE_SYNCE_ETH_PORT: ethernet port PHY's recovered clock
+ * @DPLL_PIN_TYPE_INT_OSCILLATOR: device internal oscillator
+ * @DPLL_PIN_TYPE_GNSS: GNSS recovered clock
+ */
+enum dpll_pin_type {
+ DPLL_PIN_TYPE_MUX = 1,
+ DPLL_PIN_TYPE_EXT,
+ DPLL_PIN_TYPE_SYNCE_ETH_PORT,
+ DPLL_PIN_TYPE_INT_OSCILLATOR,
+ DPLL_PIN_TYPE_GNSS,
+
+ /* private: */
+ __DPLL_PIN_TYPE_MAX,
+ DPLL_PIN_TYPE_MAX = (__DPLL_PIN_TYPE_MAX - 1)
+};
+
+/**
+ * enum dpll_pin_direction - defines possible direction of a pin, valid values
+ * for DPLL_A_PIN_DIRECTION attribute
+ * @DPLL_PIN_DIRECTION_INPUT: pin used as a input of a signal
+ * @DPLL_PIN_DIRECTION_OUTPUT: pin used to output the signal
+ */
+enum dpll_pin_direction {
+ DPLL_PIN_DIRECTION_INPUT = 1,
+ DPLL_PIN_DIRECTION_OUTPUT,
+
+ /* private: */
+ __DPLL_PIN_DIRECTION_MAX,
+ DPLL_PIN_DIRECTION_MAX = (__DPLL_PIN_DIRECTION_MAX - 1)
+};
+
+#define DPLL_PIN_FREQUENCY_1_HZ 1
+#define DPLL_PIN_FREQUENCY_10_KHZ 10000
+#define DPLL_PIN_FREQUENCY_77_5_KHZ 77500
+#define DPLL_PIN_FREQUENCY_10_MHZ 10000000
+
+/**
+ * enum dpll_pin_state - defines possible states of a pin, valid values for
+ * DPLL_A_PIN_STATE attribute
+ * @DPLL_PIN_STATE_CONNECTED: pin connected, active input of phase locked loop
+ * @DPLL_PIN_STATE_DISCONNECTED: pin disconnected, not considered as a valid
+ * input
+ * @DPLL_PIN_STATE_SELECTABLE: pin enabled for automatic input selection
+ */
+enum dpll_pin_state {
+ DPLL_PIN_STATE_CONNECTED = 1,
+ DPLL_PIN_STATE_DISCONNECTED,
+ DPLL_PIN_STATE_SELECTABLE,
+
+ /* private: */
+ __DPLL_PIN_STATE_MAX,
+ DPLL_PIN_STATE_MAX = (__DPLL_PIN_STATE_MAX - 1)
+};
+
+/**
+ * enum dpll_pin_capabilities - defines possible capabilities of a pin, valid
+ * flags on DPLL_A_PIN_CAPABILITIES attribute
+ * @DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE: pin direction can be changed
+ * @DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE: pin priority can be changed
+ * @DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE: pin state can be changed
+ */
+enum dpll_pin_capabilities {
+ DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE = 1,
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE = 2,
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE = 4,
+};
+
+#define DPLL_PHASE_OFFSET_DIVIDER 1000
+
+enum dpll_a {
+ DPLL_A_ID = 1,
+ DPLL_A_MODULE_NAME,
+ DPLL_A_PAD,
+ DPLL_A_CLOCK_ID,
+ DPLL_A_MODE,
+ DPLL_A_MODE_SUPPORTED,
+ DPLL_A_LOCK_STATUS,
+ DPLL_A_TEMP,
+ DPLL_A_TYPE,
+
+ __DPLL_A_MAX,
+ DPLL_A_MAX = (__DPLL_A_MAX - 1)
+};
+
+enum dpll_a_pin {
+ DPLL_A_PIN_ID = 1,
+ DPLL_A_PIN_PARENT_ID,
+ DPLL_A_PIN_MODULE_NAME,
+ DPLL_A_PIN_PAD,
+ DPLL_A_PIN_CLOCK_ID,
+ DPLL_A_PIN_BOARD_LABEL,
+ DPLL_A_PIN_PANEL_LABEL,
+ DPLL_A_PIN_PACKAGE_LABEL,
+ DPLL_A_PIN_TYPE,
+ DPLL_A_PIN_DIRECTION,
+ DPLL_A_PIN_FREQUENCY,
+ DPLL_A_PIN_FREQUENCY_SUPPORTED,
+ DPLL_A_PIN_FREQUENCY_MIN,
+ DPLL_A_PIN_FREQUENCY_MAX,
+ DPLL_A_PIN_PRIO,
+ DPLL_A_PIN_STATE,
+ DPLL_A_PIN_CAPABILITIES,
+ DPLL_A_PIN_PARENT_DEVICE,
+ DPLL_A_PIN_PARENT_PIN,
+ DPLL_A_PIN_PHASE_ADJUST_MIN,
+ DPLL_A_PIN_PHASE_ADJUST_MAX,
+ DPLL_A_PIN_PHASE_ADJUST,
+ DPLL_A_PIN_PHASE_OFFSET,
+ DPLL_A_PIN_FRACTIONAL_FREQUENCY_OFFSET,
+
+ __DPLL_A_PIN_MAX,
+ DPLL_A_PIN_MAX = (__DPLL_A_PIN_MAX - 1)
+};
+
+enum dpll_cmd {
+ DPLL_CMD_DEVICE_ID_GET = 1,
+ DPLL_CMD_DEVICE_GET,
+ DPLL_CMD_DEVICE_SET,
+ DPLL_CMD_DEVICE_CREATE_NTF,
+ DPLL_CMD_DEVICE_DELETE_NTF,
+ DPLL_CMD_DEVICE_CHANGE_NTF,
+ DPLL_CMD_PIN_ID_GET,
+ DPLL_CMD_PIN_GET,
+ DPLL_CMD_PIN_SET,
+ DPLL_CMD_PIN_CREATE_NTF,
+ DPLL_CMD_PIN_DELETE_NTF,
+ DPLL_CMD_PIN_CHANGE_NTF,
+
+ __DPLL_CMD_MAX,
+ DPLL_CMD_MAX = (__DPLL_CMD_MAX - 1)
+};
+
+#define DPLL_MCGRP_MONITOR "monitor"
+
+#endif /* _UAPI_LINUX_DPLL_H */
diff --git a/include/uapi/linux/dvb/frontend.h b/include/uapi/linux/dvb/frontend.h
index 326f6a53f1f2..7e0983b987c2 100644
--- a/include/uapi/linux/dvb/frontend.h
+++ b/include/uapi/linux/dvb/frontend.h
@@ -296,6 +296,10 @@ enum fe_spectral_inversion {
* @FEC_28_45: Forward Error Correction Code 28/45
* @FEC_32_45: Forward Error Correction Code 32/45
* @FEC_77_90: Forward Error Correction Code 77/90
+ * @FEC_11_45: Forward Error Correction Code 11/45
+ * @FEC_4_15: Forward Error Correction Code 4/15
+ * @FEC_14_45: Forward Error Correction Code 14/45
+ * @FEC_7_15: Forward Error Correction Code 7/15
*
* Please note that not all FEC types are supported by a given standard.
*/
@@ -329,6 +333,10 @@ enum fe_code_rate {
FEC_28_45,
FEC_32_45,
FEC_77_90,
+ FEC_11_45,
+ FEC_4_15,
+ FEC_14_45,
+ FEC_7_15,
};
/**
diff --git a/include/uapi/linux/dvb/version.h b/include/uapi/linux/dvb/version.h
index 1a8cd038aa0b..20bc874de321 100644
--- a/include/uapi/linux/dvb/version.h
+++ b/include/uapi/linux/dvb/version.h
@@ -10,6 +10,6 @@
#define _DVBVERSION_H_
#define DVB_API_VERSION 5
-#define DVB_API_VERSION_MINOR 11
+#define DVB_API_VERSION_MINOR 12
#endif /*_DVBVERSION_H_*/
diff --git a/include/uapi/linux/elf-fdpic.h b/include/uapi/linux/elf-fdpic.h
index 4fcc6cfebe18..ec23f0871129 100644
--- a/include/uapi/linux/elf-fdpic.h
+++ b/include/uapi/linux/elf-fdpic.h
@@ -32,4 +32,19 @@ struct elf32_fdpic_loadmap {
#define ELF32_FDPIC_LOADMAP_VERSION 0x0000
+/* segment mappings for ELF FDPIC libraries/executables/interpreters */
+struct elf64_fdpic_loadseg {
+ Elf64_Addr addr; /* core address to which mapped */
+ Elf64_Addr p_vaddr; /* VMA recorded in file */
+ Elf64_Word p_memsz; /* allocation size recorded in file */
+};
+
+struct elf64_fdpic_loadmap {
+ Elf64_Half version; /* version of these structures, just in case... */
+ Elf64_Half nsegs; /* number of segments */
+ struct elf64_fdpic_loadseg segs[];
+};
+
+#define ELF64_FDPIC_LOADMAP_VERSION 0x0000
+
#endif /* _UAPI_LINUX_ELF_FDPIC_H */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index ac3da855fb19..9417309b7230 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -140,7 +140,7 @@ typedef __s64 Elf64_Sxword;
#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
-typedef struct dynamic {
+typedef struct {
Elf32_Sword d_tag;
union {
Elf32_Sword d_val;
@@ -372,7 +372,8 @@ typedef struct elf64_shdr {
* Notes used in ET_CORE. Architectures export some of the arch register sets
* using the corresponding note types via the PTRACE_GETREGSET and
* PTRACE_SETREGSET requests.
- * The note name for all these is "LINUX".
+ * The note name for these types is "LINUX", except NT_PRFPREG that is named
+ * "CORE".
*/
#define NT_PRSTATUS 1
#define NT_PRFPREG 2
@@ -403,9 +404,13 @@ typedef struct elf64_shdr {
#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
#define NT_PPC_PKEY 0x110 /* Memory Protection Keys registers */
+#define NT_PPC_DEXCR 0x111 /* PowerPC DEXCR registers */
+#define NT_PPC_HASHKEYR 0x112 /* PowerPC HASHKEYR register */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
+/* Old binutils treats 0x203 as a CET state */
+#define NT_X86_SHSTK 0x204 /* x86 SHSTK state */
#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
#define NT_S390_TIMER 0x301 /* s390 timer register */
#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
@@ -440,6 +445,8 @@ typedef struct elf64_shdr {
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
+#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
+#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index f7fba0dc87e5..06ef6b78b7de 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1266,6 +1266,8 @@ struct ethtool_rxfh_indir {
* hardware hash key.
* @hfunc: Defines the current RSS hash function used by HW (or to be set to).
* Valid values are one of the %ETH_RSS_HASH_*.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
* @rsvd8: Reserved for future use; see the note on reserved space.
* @rsvd32: Reserved for future use; see the note on reserved space.
* @rss_config: RX ring/queue index for each hash value i.e., indirection table
@@ -1285,7 +1287,8 @@ struct ethtool_rxfh {
__u32 indir_size;
__u32 key_size;
__u8 hfunc;
- __u8 rsvd8[3];
+ __u8 input_xfrm;
+ __u8 rsvd8[2];
__u32 rsvd32;
__u32 rss_config[];
};
@@ -1992,6 +1995,15 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define WOL_MODE_COUNT 8
+/* RSS hash function data
+ * XOR the corresponding source and destination fields of each specified
+ * protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH
+ * calculation. Note that this XORing reduces the input set entropy and could
+ * be exploited to reduce the RSS queue spread.
+ */
+#define RXH_XFRM_SYM_XOR (1 << 0)
+#define RXH_XFRM_NO_CHANGE 0xff
+
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
@@ -2128,18 +2140,6 @@ enum ethtool_reset_flags {
* refused. For drivers: ignore this field (use kernel's
* __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will
* be overwritten by kernel.
- * @supported: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features for which the interface
- * supports autonegotiation or auto-detection. Read-only.
- * @advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, physical
- * connectors and other link features that are advertised through
- * autonegotiation or enabled for auto-detection.
- * @lp_advertising: Bitmap with each bit meaning given by
- * %ethtool_link_mode_bit_indices for the link modes, and other
- * link features that the link partner advertised through
- * autonegotiation; 0 if unknown or not applicable. Read-only.
* @transceiver: Used to distinguish different possible PHY types,
* reported consistently by PHYLIB. Read-only.
* @master_slave_cfg: Master/slave port mode.
@@ -2181,6 +2181,21 @@ enum ethtool_reset_flags {
* %set_link_ksettings() should validate all fields other than @cmd
* and @link_mode_masks_nwords that are not described as read-only or
* deprecated, and must ignore all fields described as read-only.
+ *
+ * @link_mode_masks is divided into three bitfields, each of length
+ * @link_mode_masks_nwords:
+ * - supported: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features for which the interface
+ * supports autonegotiation or auto-detection. Read-only.
+ * - advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, physical
+ * connectors and other link features that are advertised through
+ * autonegotiation or enabled for auto-detection.
+ * - lp_advertising: Bitmap with each bit meaning given by
+ * %ethtool_link_mode_bit_indices for the link modes, and other
+ * link features that the link partner advertised through
+ * autonegotiation; 0 if unknown or not applicable. Read-only.
*/
struct ethtool_link_settings {
__u32 cmd;
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 73e2c10dc2cc..3f89074aa06c 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -908,6 +908,7 @@ enum {
ETHTOOL_A_RSS_HFUNC, /* u32 */
ETHTOOL_A_RSS_INDIR, /* binary */
ETHTOOL_A_RSS_HKEY, /* binary */
+ ETHTOOL_A_RSS_INPUT_XFRM, /* u32 */
__ETHTOOL_A_RSS_CNT,
ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1),
diff --git a/include/uapi/linux/eventfd.h b/include/uapi/linux/eventfd.h
new file mode 100644
index 000000000000..2eb9ab6c32f3
--- /dev/null
+++ b/include/uapi/linux/eventfd.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_EVENTFD_H
+#define _UAPI_LINUX_EVENTFD_H
+
+#include <linux/fcntl.h>
+
+#define EFD_SEMAPHORE (1 << 0)
+#define EFD_CLOEXEC O_CLOEXEC
+#define EFD_NONBLOCK O_NONBLOCK
+
+#endif /* _UAPI_LINUX_EVENTFD_H */
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index e8c07da58c9f..282e90aeb163 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -112,4 +112,12 @@
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+/* Flags for name_to_handle_at(2). We reuse AT_ flag space to save bits... */
+#define AT_HANDLE_FID AT_REMOVEDIR /* file handle is needed to
+ compare object identity and may not
+ be usable to open_by_handle_at(2) */
+#if defined(__KERNEL__)
+#define AT_GETATTR_NOSEC 0x80000000
+#endif
+
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index 92be3ea3c6e0..1f2c9469f921 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -46,6 +46,12 @@
#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08
#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09
+/* available since kernel version 6.5 */
+#define FW_CDEV_EVENT_REQUEST3 0x0a
+#define FW_CDEV_EVENT_RESPONSE2 0x0b
+#define FW_CDEV_EVENT_PHY_PACKET_SENT2 0x0c
+#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED2 0x0d
+
/**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_* types
* @closure: For arbitrary use by userspace
@@ -103,6 +109,32 @@ struct fw_cdev_event_bus_reset {
* @length: Data length, i.e. the response's payload size in bytes
* @data: Payload data, if any
*
+ * This event is sent instead of &fw_cdev_event_response if the kernel or the client implements
+ * ABI version <= 5. It has the lack of time stamp field comparing to &fw_cdev_event_response2.
+ */
+struct fw_cdev_event_response {
+ __u64 closure;
+ __u32 type;
+ __u32 rcode;
+ __u32 length;
+ __u32 data[];
+};
+
+/**
+ * struct fw_cdev_event_response2 - Sent when a response packet was received
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST
+ * or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST
+ * or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE
+ * @rcode: Response code returned by the remote node
+ * @length: Data length, i.e. the response's payload size in bytes
+ * @request_tstamp: The time stamp of isochronous cycle at which the request was sent.
+ * @response_tstamp: The time stamp of isochronous cycle at which the response was sent.
+ * @padding: Padding to keep the size of structure as multiples of 8 in various architectures
+ * since 4 byte alignment is used for 8 byte of object type in System V ABI for i386
+ * architecture.
+ * @data: Payload data, if any
+ *
* This event is sent when the stack receives a response to an outgoing request
* sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses
* carrying data (read and lock responses) follows immediately and can be
@@ -112,12 +144,21 @@ struct fw_cdev_event_bus_reset {
* involve response packets. This includes unified write transactions,
* broadcast write transactions, and transmission of asynchronous stream
* packets. @rcode indicates success or failure of such transmissions.
+ *
+ * The value of @request_tstamp expresses the isochronous cycle at which the request was sent to
+ * initiate the transaction. The value of @response_tstamp expresses the isochronous cycle at which
+ * the response arrived to complete the transaction. Each value is unsigned 16 bit integer
+ * containing three low order bits of second field and all 13 bits of cycle field in format of
+ * CYCLE_TIMER register.
*/
-struct fw_cdev_event_response {
+struct fw_cdev_event_response2 {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
+ __u32 request_tstamp;
+ __u32 response_tstamp;
+ __u32 padding;
__u32 data[];
};
@@ -159,6 +200,41 @@ struct fw_cdev_event_request {
* @length: Data length, i.e. the request's payload size in bytes
* @data: Incoming data, if any
*
+ * This event is sent instead of &fw_cdev_event_request3 if the kernel or the client implements
+ * ABI version <= 5. It has the lack of time stamp field comparing to &fw_cdev_event_request3.
+ */
+struct fw_cdev_event_request2 {
+ __u64 closure;
+ __u32 type;
+ __u32 tcode;
+ __u64 offset;
+ __u32 source_node_id;
+ __u32 destination_node_id;
+ __u32 card;
+ __u32 generation;
+ __u32 handle;
+ __u32 length;
+ __u32 data[];
+};
+
+/**
+ * struct fw_cdev_event_request3 - Sent on incoming request to an address region
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
+ * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2
+ * @tcode: Transaction code of the incoming request
+ * @offset: The offset into the 48-bit per-node address space
+ * @source_node_id: Sender node ID
+ * @destination_node_id: Destination node ID
+ * @card: The index of the card from which the request came
+ * @generation: Bus generation in which the request is valid
+ * @handle: Reference to the kernel-side pending request
+ * @length: Data length, i.e. the request's payload size in bytes
+ * @tstamp: The time stamp of isochronous cycle at which the request arrived.
+ * @padding: Padding to keep the size of structure as multiples of 8 in various architectures
+ * since 4 byte alignment is used for 8 byte of object type in System V ABI for i386
+ * architecture.
+ * @data: Incoming data, if any
+ *
* This event is sent when the stack receives an incoming request to an address
* region registered using the %FW_CDEV_IOC_ALLOCATE ioctl. The request is
* guaranteed to be completely contained in the specified region. Userspace is
@@ -191,10 +267,14 @@ struct fw_cdev_event_request {
* sent.
*
* If the client subsequently needs to initiate requests to the sender node of
- * an &fw_cdev_event_request2, it needs to use a device file with matching
+ * an &fw_cdev_event_request3, it needs to use a device file with matching
* card index, node ID, and generation for outbound requests.
+ *
+ * @tstamp is isochronous cycle at which the request arrived. It is 16 bit integer value and the
+ * higher 3 bits expresses three low order bits of second field in the format of CYCLE_TIME
+ * register and the rest 13 bits expresses cycle field.
*/
-struct fw_cdev_event_request2 {
+struct fw_cdev_event_request3 {
__u64 closure;
__u32 type;
__u32 tcode;
@@ -205,6 +285,8 @@ struct fw_cdev_event_request2 {
__u32 generation;
__u32 handle;
__u32 length;
+ __u32 tstamp;
+ __u32 padding;
__u32 data[];
};
@@ -341,20 +423,59 @@ struct fw_cdev_event_iso_resource {
* @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED
* @rcode: %RCODE_..., indicates success or failure of transmission
* @length: Data length in bytes
+ * @data: Incoming data for %FW_CDEV_IOC_RECEIVE_PHY_PACKETS. For %FW_CDEV_IOC_SEND_PHY_PACKET
+ * the field has the same data in the request, thus the length of 8 bytes.
+ *
+ * This event is sent instead of &fw_cdev_event_phy_packet2 if the kernel or
+ * the client implements ABI version <= 5. It has the lack of time stamp field comparing to
+ * &fw_cdev_event_phy_packet2.
+ */
+struct fw_cdev_event_phy_packet {
+ __u64 closure;
+ __u32 type;
+ __u32 rcode;
+ __u32 length;
+ __u32 data[];
+};
+
+/**
+ * struct fw_cdev_event_phy_packet2 - A PHY packet was transmitted or received with time stamp.
+ * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET
+ * or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl
+ * @type: %FW_CDEV_EVENT_PHY_PACKET_SENT2 or %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
+ * @rcode: %RCODE_..., indicates success or failure of transmission
+ * @length: Data length in bytes
+ * @tstamp: For %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, the time stamp of isochronous cycle at
+ * which the packet arrived. For %FW_CDEV_EVENT_PHY_PACKET_SENT2 and non-ping packet,
+ * the time stamp of isochronous cycle at which the packet was sent. For ping packet,
+ * the tick count for round-trip time measured by 1394 OHCI controller.
+ * The time stamp of isochronous cycle at which either the response was sent for
+ * %FW_CDEV_EVENT_PHY_PACKET_SENT2 or the request arrived for
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2.
* @data: Incoming data
*
- * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty,
- * except in case of a ping packet: Then, @length is 4, and @data[0] is the
- * ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE.
+ * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT2, @length is 8 and @data consists of the two PHY
+ * packet quadlets to be sent, in host byte order,
*
- * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data
- * consists of the two PHY packet quadlets, in host byte order.
+ * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, @length is 8 and @data consists of the two PHY
+ * packet quadlets, in host byte order.
+ *
+ * For %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2, the @tstamp is the isochronous cycle at which the
+ * packet arrived. It is 16 bit integer value and the higher 3 bits expresses three low order bits
+ * of second field and the rest 13 bits expresses cycle field in the format of CYCLE_TIME register.
+ *
+ * For %FW_CDEV_EVENT_PHY_PACKET_SENT2, the @tstamp has different meanings whether to sent the
+ * packet for ping or not. If it's not for ping, the @tstamp is the isochronous cycle at which the
+ * packet was sent, and use the same format as the case of %FW_CDEV_EVENT_PHY_PACKET_SENT2. If it's
+ * for ping, the @tstamp is for round-trip time measured by 1394 OHCI controller with 42.195 MHz
+ * resolution.
*/
-struct fw_cdev_event_phy_packet {
+struct fw_cdev_event_phy_packet2 {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
+ __u32 tstamp;
__u32 data[];
};
@@ -375,6 +496,11 @@ struct fw_cdev_event_phy_packet {
* %FW_CDEV_EVENT_PHY_PACKET_SENT or
* %FW_CDEV_EVENT_PHY_PACKET_RECEIVED
*
+ * @request3: Valid if @common.type == %FW_CDEV_EVENT_REQUEST3
+ * @response2: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE2
+ * @phy_packet2: Valid if @common.type == %FW_CDEV_EVENT_PHY_PACKET_SENT2 or
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
+ *
* Convenience union for userspace use. Events could be read(2) into an
* appropriately aligned char buffer and then cast to this union for further
* processing. Note that for a request, response or iso_interrupt event,
@@ -393,6 +519,9 @@ union fw_cdev_event {
struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */
struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */
struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */
+ struct fw_cdev_event_request3 request3; /* added in 6.5 */
+ struct fw_cdev_event_response2 response2; /* added in 6.5 */
+ struct fw_cdev_event_phy_packet2 phy_packet2; /* added in 6.5 */
};
/* available since kernel version 2.6.22 */
@@ -457,6 +586,11 @@ union fw_cdev_event {
* 5 (3.4) - send %FW_CDEV_EVENT_ISO_INTERRUPT events when needed to
* avoid dropping data
* - added %FW_CDEV_IOC_FLUSH_ISO
+ * 6 (6.5) - added some event for subactions of asynchronous transaction with time stamp
+ * - %FW_CDEV_EVENT_REQUEST3
+ * - %FW_CDEV_EVENT_RESPONSE2
+ * - %FW_CDEV_EVENT_PHY_PACKET_SENT2
+ * - %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2
*/
/**
@@ -502,11 +636,11 @@ struct fw_cdev_get_info {
* @data: Userspace pointer to payload
* @generation: The bus generation where packet is valid
*
- * Send a request to the device. This ioctl implements all outgoing requests.
- * Both quadlet and block request specify the payload as a pointer to the data
- * in the @data field. Once the transaction completes, the kernel writes an
- * &fw_cdev_event_response event back. The @closure field is passed back to
- * user space in the response event.
+ * Send a request to the device. This ioctl implements all outgoing requests. Both quadlet and
+ * block request specify the payload as a pointer to the data in the @data field. Once the
+ * transaction completes, the kernel writes either &fw_cdev_event_response event or
+ * &fw_cdev_event_response event back. The @closure field is passed back to user space in the
+ * response event.
*/
struct fw_cdev_send_request {
__u32 tcode;
@@ -989,10 +1123,9 @@ struct fw_cdev_allocate_iso_resource {
* @generation: The bus generation where packet is valid
* @speed: Speed to transmit at
*
- * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet
- * to every device which is listening to the specified channel. The kernel
- * writes an &fw_cdev_event_response event which indicates success or failure of
- * the transmission.
+ * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet to every device
+ * which is listening to the specified channel. The kernel writes either &fw_cdev_event_response
+ * event or &fw_cdev_event_response2 event which indicates success or failure of the transmission.
*/
struct fw_cdev_send_stream_packet {
__u32 length;
@@ -1011,8 +1144,8 @@ struct fw_cdev_send_stream_packet {
* @data: First and second quadlet of the PHY packet
* @generation: The bus generation where packet is valid
*
- * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes
- * on the same card as this device. After transmission, an
+ * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes on the same card as this
+ * device. After transmission, either %FW_CDEV_EVENT_PHY_PACKET_SENT event or
* %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated.
*
* The payload @data\[\] shall be specified in host byte order. Usually,
@@ -1031,8 +1164,9 @@ struct fw_cdev_send_phy_packet {
* struct fw_cdev_receive_phy_packets - start reception of PHY packets
* @closure: Passed back to userspace in phy packet events
*
- * This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to
- * incoming PHY packets from any node on the same bus as the device.
+ * This ioctl activates issuing of either %FW_CDEV_EVENT_PHY_PACKET_RECEIVED or
+ * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED2 due to incoming PHY packets from any node on the same bus
+ * as the device.
*
* The ioctl is only permitted on device files which represent a local node.
*/
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index b7b56871029c..48ad69f7722e 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -305,4 +305,64 @@ typedef int __bitwise __kernel_rwf_t;
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
RWF_APPEND)
+/* Pagemap ioctl */
+#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg)
+
+/* Bitmasks provided in pm_scan_args masks and reported in page_region.categories. */
+#define PAGE_IS_WPALLOWED (1 << 0)
+#define PAGE_IS_WRITTEN (1 << 1)
+#define PAGE_IS_FILE (1 << 2)
+#define PAGE_IS_PRESENT (1 << 3)
+#define PAGE_IS_SWAPPED (1 << 4)
+#define PAGE_IS_PFNZERO (1 << 5)
+#define PAGE_IS_HUGE (1 << 6)
+#define PAGE_IS_SOFT_DIRTY (1 << 7)
+
+/*
+ * struct page_region - Page region with flags
+ * @start: Start of the region
+ * @end: End of the region (exclusive)
+ * @categories: PAGE_IS_* category bitmask for the region
+ */
+struct page_region {
+ __u64 start;
+ __u64 end;
+ __u64 categories;
+};
+
+/* Flags for PAGEMAP_SCAN ioctl */
+#define PM_SCAN_WP_MATCHING (1 << 0) /* Write protect the pages matched. */
+#define PM_SCAN_CHECK_WPASYNC (1 << 1) /* Abort the scan when a non-WP-enabled page is found. */
+
+/*
+ * struct pm_scan_arg - Pagemap ioctl argument
+ * @size: Size of the structure
+ * @flags: Flags for the IOCTL
+ * @start: Starting address of the region
+ * @end: Ending address of the region
+ * @walk_end Address where the scan stopped (written by kernel).
+ * walk_end == end (address tags cleared) informs that the scan completed on entire range.
+ * @vec: Address of page_region struct array for output
+ * @vec_len: Length of the page_region struct array
+ * @max_pages: Optional limit for number of returned pages (0 = disabled)
+ * @category_inverted: PAGE_IS_* categories which values match if 0 instead of 1
+ * @category_mask: Skip pages for which any category doesn't match
+ * @category_anyof_mask: Skip pages for which no category matches
+ * @return_mask: PAGE_IS_* categories that are to be reported in `page_region`s returned
+ */
+struct pm_scan_arg {
+ __u64 size;
+ __u64 flags;
+ __u64 start;
+ __u64 end;
+ __u64 walk_end;
+ __u64 vec;
+ __u64 vec_len;
+ __u64 max_pages;
+ __u64 category_inverted;
+ __u64 category_mask;
+ __u64 category_anyof_mask;
+ __u64 return_mask;
+};
+
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index fd1fb0d5389d..7a8f4c290187 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -71,7 +71,8 @@ struct fscrypt_policy_v2 {
__u8 contents_encryption_mode;
__u8 filenames_encryption_mode;
__u8 flags;
- __u8 __reserved[4];
+ __u8 log2_data_unit_size;
+ __u8 __reserved[3];
__u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
};
diff --git a/include/uapi/linux/fsi.h b/include/uapi/linux/fsi.h
index b2f1977378c7..a2e730fc6309 100644
--- a/include/uapi/linux/fsi.h
+++ b/include/uapi/linux/fsi.h
@@ -60,6 +60,16 @@ struct scom_access {
*/
/**
+ * FSI_SBEFIFO_CMD_TIMEOUT sets the timeout for writing data to the SBEFIFO.
+ *
+ * The command timeout is specified in seconds. The minimum value of command
+ * timeout is 1 seconds (default) and the maximum value of command timeout is
+ * 120 seconds. A command timeout of 0 will reset the value to the default of
+ * 1 seconds.
+ */
+#define FSI_SBEFIFO_CMD_TIMEOUT_SECONDS _IOW('s', 0x01, __u32)
+
+/**
* FSI_SBEFIFO_READ_TIMEOUT sets the read timeout for response from SBE.
*
* The read timeout is specified in seconds. The minimum value of read
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 1b9d0dfae72d..e7418d15fe39 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -206,6 +206,11 @@
* - add extension header
* - add FUSE_EXT_GROUPS
* - add FUSE_CREATE_SUPP_GROUP
+ * - add FUSE_HAS_EXPIRE_ONLY
+ *
+ * 7.39
+ * - add FUSE_DIRECT_IO_ALLOW_MMAP
+ * - add FUSE_STATX and related structures
*/
#ifndef _LINUX_FUSE_H
@@ -241,7 +246,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 38
+#define FUSE_KERNEL_MINOR_VERSION 39
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -268,6 +273,40 @@ struct fuse_attr {
uint32_t flags;
};
+/*
+ * The following structures are bit-for-bit compatible with the statx(2) ABI in
+ * Linux.
+ */
+struct fuse_sx_time {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t __reserved;
+};
+
+struct fuse_statx {
+ uint32_t mask;
+ uint32_t blksize;
+ uint64_t attributes;
+ uint32_t nlink;
+ uint32_t uid;
+ uint32_t gid;
+ uint16_t mode;
+ uint16_t __spare0[1];
+ uint64_t ino;
+ uint64_t size;
+ uint64_t blocks;
+ uint64_t attributes_mask;
+ struct fuse_sx_time atime;
+ struct fuse_sx_time btime;
+ struct fuse_sx_time ctime;
+ struct fuse_sx_time mtime;
+ uint32_t rdev_major;
+ uint32_t rdev_minor;
+ uint32_t dev_major;
+ uint32_t dev_minor;
+ uint64_t __spare2[14];
+};
+
struct fuse_kstatfs {
uint64_t blocks;
uint64_t bfree;
@@ -369,6 +408,8 @@ struct fuse_file_lock {
* FUSE_HAS_INODE_DAX: use per inode DAX
* FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
* symlink and mknod (single group that matches parent)
+ * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
+ * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode.
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -406,6 +447,11 @@ struct fuse_file_lock {
#define FUSE_SECURITY_CTX (1ULL << 32)
#define FUSE_HAS_INODE_DAX (1ULL << 33)
#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
+#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
+#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36)
+
+/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
+#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP
/**
* CUSE INIT request/reply flags
@@ -572,6 +618,7 @@ enum fuse_opcode {
FUSE_REMOVEMAPPING = 49,
FUSE_SYNCFS = 50,
FUSE_TMPFILE = 51,
+ FUSE_STATX = 52,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -636,6 +683,22 @@ struct fuse_attr_out {
struct fuse_attr attr;
};
+struct fuse_statx_in {
+ uint32_t getattr_flags;
+ uint32_t reserved;
+ uint64_t fh;
+ uint32_t sx_flags;
+ uint32_t sx_mask;
+};
+
+struct fuse_statx_out {
+ uint64_t attr_valid; /* Cache timeout for the attributes */
+ uint32_t attr_valid_nsec;
+ uint32_t flags;
+ uint64_t spare[2];
+ struct fuse_statx stat;
+};
+
#define FUSE_COMPAT_MKNOD_IN_SIZE 8
struct fuse_mknod_in {
diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
index 71a5df8d2689..d2ee625ea189 100644
--- a/include/uapi/linux/futex.h
+++ b/include/uapi/linux/futex.h
@@ -44,10 +44,35 @@
FUTEX_PRIVATE_FLAG)
/*
- * Flags to specify the bit length of the futex word for futex2 syscalls.
- * Currently, only 32 is supported.
+ * Flags for futex2 syscalls.
+ *
+ * NOTE: these are not pure flags, they can also be seen as:
+ *
+ * union {
+ * u32 flags;
+ * struct {
+ * u32 size : 2,
+ * numa : 1,
+ * : 4,
+ * private : 1;
+ * };
+ * };
*/
-#define FUTEX_32 2
+#define FUTEX2_SIZE_U8 0x00
+#define FUTEX2_SIZE_U16 0x01
+#define FUTEX2_SIZE_U32 0x02
+#define FUTEX2_SIZE_U64 0x03
+#define FUTEX2_NUMA 0x04
+ /* 0x08 */
+ /* 0x10 */
+ /* 0x20 */
+ /* 0x40 */
+#define FUTEX2_PRIVATE FUTEX_PRIVATE_FLAG
+
+#define FUTEX2_SIZE_MASK 0x03
+
+/* do not use */
+#define FUTEX_32 FUTEX2_SIZE_U32 /* historical accident :-( */
/*
* Max numbers of elements in a futex_waitv array
diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index eb67884e5f38..3a93f17ca943 100644
--- a/include/uapi/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -1,11 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (c) 2022/23 Siemens Mobility GmbH */
#ifndef _LINUX_GSMMUX_H
#define _LINUX_GSMMUX_H
+#include <linux/const.h>
#include <linux/if.h>
#include <linux/ioctl.h>
#include <linux/types.h>
+/*
+ * flags definition for n_gsm
+ *
+ * Used by:
+ * struct gsm_config_ext.flags
+ * struct gsm_dlci_config.flags
+ */
+/* Forces a DLCI reset if set. Otherwise, a DLCI reset is only done if
+ * incompatible settings were provided. Always cleared on retrieval.
+ */
+#define GSM_FL_RESTART _BITUL(0)
+
+/**
+ * struct gsm_config - n_gsm basic configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF and GSMIOC_SETCONF
+ * to retrieve and set the basic parameters of an n_gsm ldisc.
+ * struct gsm_config_ext can be used to configure extended ldisc parameters.
+ *
+ * All timers are in units of 1/100th of a second.
+ *
+ * @adaption: Convergence layer type
+ * @encapsulation: Framing (0 = basic option, 1 = advanced option)
+ * @initiator: Initiator or responder
+ * @t1: Acknowledgment timer
+ * @t2: Response timer for multiplexer control channel
+ * @t3: Response timer for wake-up procedure
+ * @n2: Maximum number of retransmissions
+ * @mru: Maximum incoming frame payload size
+ * @mtu: Maximum outgoing frame payload size
+ * @k: Window size
+ * @i: Frame type (1 = UIH, 2 = UI)
+ * @unused: Can not be used
+ */
struct gsm_config
{
unsigned int adaption;
@@ -19,18 +55,32 @@ struct gsm_config
unsigned int mtu;
unsigned int k;
unsigned int i;
- unsigned int unused[8]; /* Can not be used */
+ unsigned int unused[8];
};
#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config)
#define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config)
+/**
+ * struct gsm_netconfig - n_gsm network configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_ENABLE_NET and
+ * GSMIOC_DISABLE_NET to enable or disable a network data connection
+ * over a mux virtual tty channel. This is for modems that support
+ * data connections with raw IP frames instead of PPP.
+ *
+ * @adaption: Adaption to use in network mode.
+ * @protocol: Protocol to use - only ETH_P_IP supported.
+ * @unused2: Can not be used.
+ * @if_name: Interface name format string.
+ * @unused: Can not be used.
+ */
struct gsm_netconfig {
- unsigned int adaption; /* Adaption to use in network mode */
- unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */
- unsigned short unused2; /* Can not be used */
- char if_name[IFNAMSIZ]; /* interface name format string */
- __u8 unused[28]; /* Can not be used */
+ unsigned int adaption;
+ unsigned short protocol;
+ unsigned short unused2;
+ char if_name[IFNAMSIZ];
+ __u8 unused[28];
};
#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig)
@@ -39,26 +89,57 @@ struct gsm_netconfig {
/* get the base tty number for a configured gsmmux tty */
#define GSMIOC_GETFIRST _IOR('G', 4, __u32)
+/**
+ * struct gsm_config_ext - n_gsm extended configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF_EXT and
+ * GSMIOC_SETCONF_EXT to retrieve and set the extended parameters of an
+ * n_gsm ldisc.
+ *
+ * All timers are in units of 1/100th of a second.
+ *
+ * @keep_alive: Control channel keep-alive in 1/100th of a second (0 to disable).
+ * @wait_config: Wait for DLCI config before opening virtual link?
+ * @flags: Mux specific flags.
+ * @reserved: For future use, must be initialized to zero.
+ */
struct gsm_config_ext {
- __u32 keep_alive; /* Control channel keep-alive in 1/100th of a
- * second (0 to disable)
- */
- __u32 wait_config; /* Wait for DLCI config before opening virtual link? */
- __u32 reserved[6]; /* For future use, must be initialized to zero */
+ __u32 keep_alive;
+ __u32 wait_config;
+ __u32 flags;
+ __u32 reserved[5];
};
#define GSMIOC_GETCONF_EXT _IOR('G', 5, struct gsm_config_ext)
#define GSMIOC_SETCONF_EXT _IOW('G', 6, struct gsm_config_ext)
-/* Set channel accordingly before calling GSMIOC_GETCONF_DLCI. */
+/**
+ * struct gsm_dlci_config - n_gsm channel configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF_DLCI and
+ * GSMIOC_SETCONF_DLCI to retrieve and set the channel specific parameters
+ * of an n_gsm ldisc.
+ *
+ * Set the channel accordingly before calling GSMIOC_GETCONF_DLCI.
+ *
+ * @channel: DLCI (0 for the associated DLCI).
+ * @adaption: Convergence layer type.
+ * @mtu: Maximum transfer unit.
+ * @priority: Priority (0 for default value).
+ * @i: Frame type (1 = UIH, 2 = UI).
+ * @k: Window size (0 for default value).
+ * @flags: DLCI specific flags.
+ * @reserved: For future use, must be initialized to zero.
+ */
struct gsm_dlci_config {
- __u32 channel; /* DLCI (0 for the associated DLCI) */
- __u32 adaption; /* Convergence layer type */
- __u32 mtu; /* Maximum transfer unit */
- __u32 priority; /* Priority (0 for default value) */
- __u32 i; /* Frame type (1 = UIH, 2 = UI) */
- __u32 k; /* Window size (0 for default value) */
- __u32 reserved[8]; /* For future use, must be initialized to zero */
+ __u32 channel;
+ __u32 adaption;
+ __u32 mtu;
+ __u32 priority;
+ __u32 i;
+ __u32 k;
+ __u32 flags;
+ __u32 reserved[7];
};
#define GSMIOC_GETCONF_DLCI _IOWR('G', 7, struct gsm_dlci_config)
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index 2f61298a7b77..3dcdb9e33cba 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -33,6 +33,6 @@ enum gtp_attrs {
GTPA_PAD,
__GTPA_MAX,
};
-#define GTPA_MAX (__GTPA_MAX + 1)
+#define GTPA_MAX (__GTPA_MAX - 1)
#endif /* _UAPI_LINUX_GTP_H_ */
diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h
index 74a8609fcb4d..0af23ec196d8 100644
--- a/include/uapi/linux/hash_info.h
+++ b/include/uapi/linux/hash_info.h
@@ -35,6 +35,9 @@ enum hash_algo {
HASH_ALGO_SM3_256,
HASH_ALGO_STREEBOG_256,
HASH_ALGO_STREEBOG_512,
+ HASH_ALGO_SHA3_256,
+ HASH_ALGO_SHA3_384,
+ HASH_ALGO_SHA3_512,
HASH_ALGO__LAST
};
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index 606b52e88ce3..3d1987e1bb2d 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -31,6 +31,7 @@ enum idxd_scmd_stat {
IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000,
IDXD_SCMD_DEV_EVL_ERR = 0x80120000,
+ IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000,
};
#define IDXD_SCMD_SOFTERR_MASK 0x80000000
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index f95326fce6bb..a5b743a2f775 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -723,6 +723,24 @@ enum {
};
#define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
+/* [MDBA_GET_ENTRY] = {
+ * struct br_mdb_entry
+ * [MDBA_GET_ENTRY_ATTRS] = {
+ * [MDBE_ATTR_SOURCE]
+ * struct in_addr / struct in6_addr
+ * [MDBE_ATTR_SRC_VNI]
+ * u32
+ * }
+ * }
+ */
+enum {
+ MDBA_GET_ENTRY_UNSPEC,
+ MDBA_GET_ENTRY,
+ MDBA_GET_ENTRY_ATTRS,
+ __MDBA_GET_ENTRY_MAX,
+};
+#define MDBA_GET_ENTRY_MAX (__MDBA_GET_ENTRY_MAX - 1)
+
/* [MDBA_SET_ENTRY_ATTRS] = {
* [MDBE_ATTR_xxx]
* ...
@@ -739,6 +757,7 @@ enum {
MDBE_ATTR_VNI,
MDBE_ATTR_IFINDEX,
MDBE_ATTR_SRC_VNI,
+ MDBE_ATTR_STATE_MASK,
__MDBE_ATTR_MAX,
};
#define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1)
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 4ac1000b0ef2..ab9bcff96e4d 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -376,7 +376,7 @@ enum {
IFLA_GSO_IPV4_MAX_SIZE,
IFLA_GRO_IPV4_MAX_SIZE,
-
+ IFLA_DPLL_PIN,
__IFLA_MAX
};
@@ -461,6 +461,286 @@ enum in6_addr_gen_mode {
/* Bridge section */
+/**
+ * DOC: Bridge enum definition
+ *
+ * Please *note* that the timer values in the following section are expected
+ * in clock_t format, which is seconds multiplied by USER_HZ (generally
+ * defined as 100).
+ *
+ * @IFLA_BR_FORWARD_DELAY
+ * The bridge forwarding delay is the time spent in LISTENING state
+ * (before moving to LEARNING) and in LEARNING state (before moving
+ * to FORWARDING). Only relevant if STP is enabled.
+ *
+ * The valid values are between (2 * USER_HZ) and (30 * USER_HZ).
+ * The default value is (15 * USER_HZ).
+ *
+ * @IFLA_BR_HELLO_TIME
+ * The time between hello packets sent by the bridge, when it is a root
+ * bridge or a designated bridge. Only relevant if STP is enabled.
+ *
+ * The valid values are between (1 * USER_HZ) and (10 * USER_HZ).
+ * The default value is (2 * USER_HZ).
+ *
+ * @IFLA_BR_MAX_AGE
+ * The hello packet timeout is the time until another bridge in the
+ * spanning tree is assumed to be dead, after reception of its last hello
+ * message. Only relevant if STP is enabled.
+ *
+ * The valid values are between (6 * USER_HZ) and (40 * USER_HZ).
+ * The default value is (20 * USER_HZ).
+ *
+ * @IFLA_BR_AGEING_TIME
+ * Configure the bridge's FDB entries aging time. It is the time a MAC
+ * address will be kept in the FDB after a packet has been received from
+ * that address. After this time has passed, entries are cleaned up.
+ * Allow values outside the 802.1 standard specification for special cases:
+ *
+ * * 0 - entry never ages (all permanent)
+ * * 1 - entry disappears (no persistence)
+ *
+ * The default value is (300 * USER_HZ).
+ *
+ * @IFLA_BR_STP_STATE
+ * Turn spanning tree protocol on (*IFLA_BR_STP_STATE* > 0) or off
+ * (*IFLA_BR_STP_STATE* == 0) for this bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_PRIORITY
+ * Set this bridge's spanning tree priority, used during STP root bridge
+ * election.
+ *
+ * The valid values are between 0 and 65535.
+ *
+ * @IFLA_BR_VLAN_FILTERING
+ * Turn VLAN filtering on (*IFLA_BR_VLAN_FILTERING* > 0) or off
+ * (*IFLA_BR_VLAN_FILTERING* == 0). When disabled, the bridge will not
+ * consider the VLAN tag when handling packets.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_VLAN_PROTOCOL
+ * Set the protocol used for VLAN filtering.
+ *
+ * The valid values are 0x8100(802.1Q) or 0x88A8(802.1AD). The default value
+ * is 0x8100(802.1Q).
+ *
+ * @IFLA_BR_GROUP_FWD_MASK
+ * The group forwarding mask. This is the bitmask that is applied to
+ * decide whether to forward incoming frames destined to link-local
+ * addresses (of the form 01:80:C2:00:00:0X).
+ *
+ * The default value is 0, which means the bridge does not forward any
+ * link-local frames coming on this port.
+ *
+ * @IFLA_BR_ROOT_ID
+ * The bridge root id, read only.
+ *
+ * @IFLA_BR_BRIDGE_ID
+ * The bridge id, read only.
+ *
+ * @IFLA_BR_ROOT_PORT
+ * The bridge root port, read only.
+ *
+ * @IFLA_BR_ROOT_PATH_COST
+ * The bridge root path cost, read only.
+ *
+ * @IFLA_BR_TOPOLOGY_CHANGE
+ * The bridge topology change, read only.
+ *
+ * @IFLA_BR_TOPOLOGY_CHANGE_DETECTED
+ * The bridge topology change detected, read only.
+ *
+ * @IFLA_BR_HELLO_TIMER
+ * The bridge hello timer, read only.
+ *
+ * @IFLA_BR_TCN_TIMER
+ * The bridge tcn timer, read only.
+ *
+ * @IFLA_BR_TOPOLOGY_CHANGE_TIMER
+ * The bridge topology change timer, read only.
+ *
+ * @IFLA_BR_GC_TIMER
+ * The bridge gc timer, read only.
+ *
+ * @IFLA_BR_GROUP_ADDR
+ * Set the MAC address of the multicast group this bridge uses for STP.
+ * The address must be a link-local address in standard Ethernet MAC address
+ * format. It is an address of the form 01:80:C2:00:00:0X, with X in [0, 4..f].
+ *
+ * The default value is 0.
+ *
+ * @IFLA_BR_FDB_FLUSH
+ * Flush bridge's fdb dynamic entries.
+ *
+ * @IFLA_BR_MCAST_ROUTER
+ * Set bridge's multicast router if IGMP snooping is enabled.
+ * The valid values are:
+ *
+ * * 0 - disabled.
+ * * 1 - automatic (queried).
+ * * 2 - permanently enabled.
+ *
+ * The default value is 1.
+ *
+ * @IFLA_BR_MCAST_SNOOPING
+ * Turn multicast snooping on (*IFLA_BR_MCAST_SNOOPING* > 0) or off
+ * (*IFLA_BR_MCAST_SNOOPING* == 0).
+ *
+ * The default value is 1.
+ *
+ * @IFLA_BR_MCAST_QUERY_USE_IFADDR
+ * If enabled use the bridge's own IP address as source address for IGMP
+ * queries (*IFLA_BR_MCAST_QUERY_USE_IFADDR* > 0) or the default of 0.0.0.0
+ * (*IFLA_BR_MCAST_QUERY_USE_IFADDR* == 0).
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_QUERIER
+ * Enable (*IFLA_BR_MULTICAST_QUERIER* > 0) or disable
+ * (*IFLA_BR_MULTICAST_QUERIER* == 0) IGMP querier, ie sending of multicast
+ * queries by the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_HASH_ELASTICITY
+ * Set multicast database hash elasticity, It is the maximum chain length in
+ * the multicast hash table. This attribute is *deprecated* and the value
+ * is always 16.
+ *
+ * @IFLA_BR_MCAST_HASH_MAX
+ * Set maximum size of the multicast hash table
+ *
+ * The default value is 4096, the value must be a power of 2.
+ *
+ * @IFLA_BR_MCAST_LAST_MEMBER_CNT
+ * The Last Member Query Count is the number of Group-Specific Queries
+ * sent before the router assumes there are no local members. The Last
+ * Member Query Count is also the number of Group-and-Source-Specific
+ * Queries sent before the router assumes there are no listeners for a
+ * particular source.
+ *
+ * The default value is 2.
+ *
+ * @IFLA_BR_MCAST_STARTUP_QUERY_CNT
+ * The Startup Query Count is the number of Queries sent out on startup,
+ * separated by the Startup Query Interval.
+ *
+ * The default value is 2.
+ *
+ * @IFLA_BR_MCAST_LAST_MEMBER_INTVL
+ * The Last Member Query Interval is the Max Response Time inserted into
+ * Group-Specific Queries sent in response to Leave Group messages, and
+ * is also the amount of time between Group-Specific Query messages.
+ *
+ * The default value is (1 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_MEMBERSHIP_INTVL
+ * The interval after which the bridge will leave a group, if no membership
+ * reports for this group are received.
+ *
+ * The default value is (260 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_QUERIER_INTVL
+ * The interval between queries sent by other routers. if no queries are
+ * seen after this delay has passed, the bridge will start to send its own
+ * queries (as if *IFLA_BR_MCAST_QUERIER_INTVL* was enabled).
+ *
+ * The default value is (255 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_QUERY_INTVL
+ * The Query Interval is the interval between General Queries sent by
+ * the Querier.
+ *
+ * The default value is (125 * USER_HZ). The minimum value is (1 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
+ * The Max Response Time used to calculate the Max Resp Code inserted
+ * into the periodic General Queries.
+ *
+ * The default value is (10 * USER_HZ).
+ *
+ * @IFLA_BR_MCAST_STARTUP_QUERY_INTVL
+ * The interval between queries in the startup phase.
+ *
+ * The default value is (125 * USER_HZ) / 4. The minimum value is (1 * USER_HZ).
+ *
+ * @IFLA_BR_NF_CALL_IPTABLES
+ * Enable (*NF_CALL_IPTABLES* > 0) or disable (*NF_CALL_IPTABLES* == 0)
+ * iptables hooks on the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_NF_CALL_IP6TABLES
+ * Enable (*NF_CALL_IP6TABLES* > 0) or disable (*NF_CALL_IP6TABLES* == 0)
+ * ip6tables hooks on the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_NF_CALL_ARPTABLES
+ * Enable (*NF_CALL_ARPTABLES* > 0) or disable (*NF_CALL_ARPTABLES* == 0)
+ * arptables hooks on the bridge.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_VLAN_DEFAULT_PVID
+ * VLAN ID applied to untagged and priority-tagged incoming packets.
+ *
+ * The default value is 1. Setting to the special value 0 makes all ports of
+ * this bridge not have a PVID by default, which means that they will
+ * not accept VLAN-untagged traffic.
+ *
+ * @IFLA_BR_PAD
+ * Bridge attribute padding type for netlink message.
+ *
+ * @IFLA_BR_VLAN_STATS_ENABLED
+ * Enable (*IFLA_BR_VLAN_STATS_ENABLED* == 1) or disable
+ * (*IFLA_BR_VLAN_STATS_ENABLED* == 0) per-VLAN stats accounting.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_STATS_ENABLED
+ * Enable (*IFLA_BR_MCAST_STATS_ENABLED* > 0) or disable
+ * (*IFLA_BR_MCAST_STATS_ENABLED* == 0) multicast (IGMP/MLD) stats
+ * accounting.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MCAST_IGMP_VERSION
+ * Set the IGMP version.
+ *
+ * The valid values are 2 and 3. The default value is 2.
+ *
+ * @IFLA_BR_MCAST_MLD_VERSION
+ * Set the MLD version.
+ *
+ * The valid values are 1 and 2. The default value is 1.
+ *
+ * @IFLA_BR_VLAN_STATS_PER_PORT
+ * Enable (*IFLA_BR_VLAN_STATS_PER_PORT* == 1) or disable
+ * (*IFLA_BR_VLAN_STATS_PER_PORT* == 0) per-VLAN per-port stats accounting.
+ * Can be changed only when there are no port VLANs configured.
+ *
+ * The default value is 0 (disabled).
+ *
+ * @IFLA_BR_MULTI_BOOLOPT
+ * The multi_boolopt is used to control new boolean options to avoid adding
+ * new netlink attributes. You can look at ``enum br_boolopt_id`` for those
+ * options.
+ *
+ * @IFLA_BR_MCAST_QUERIER_STATE
+ * Bridge mcast querier states, read only.
+ *
+ * @IFLA_BR_FDB_N_LEARNED
+ * The number of dynamically learned FDB entries for the current bridge,
+ * read only.
+ *
+ * @IFLA_BR_FDB_MAX_LEARNED
+ * Set the number of max dynamically learned FDB entries for the current
+ * bridge.
+ */
enum {
IFLA_BR_UNSPEC,
IFLA_BR_FORWARD_DELAY,
@@ -510,6 +790,8 @@ enum {
IFLA_BR_VLAN_STATS_PER_PORT,
IFLA_BR_MULTI_BOOLOPT,
IFLA_BR_MCAST_QUERIER_STATE,
+ IFLA_BR_FDB_N_LEARNED,
+ IFLA_BR_FDB_MAX_LEARNED,
__IFLA_BR_MAX,
};
@@ -520,11 +802,252 @@ struct ifla_bridge_id {
__u8 addr[6]; /* ETH_ALEN */
};
+/**
+ * DOC: Bridge mode enum definition
+ *
+ * @BRIDGE_MODE_HAIRPIN
+ * Controls whether traffic may be sent back out of the port on which it
+ * was received. This option is also called reflective relay mode, and is
+ * used to support basic VEPA (Virtual Ethernet Port Aggregator)
+ * capabilities. By default, this flag is turned off and the bridge will
+ * not forward traffic back out of the receiving port.
+ */
enum {
BRIDGE_MODE_UNSPEC,
BRIDGE_MODE_HAIRPIN,
};
+/**
+ * DOC: Bridge port enum definition
+ *
+ * @IFLA_BRPORT_STATE
+ * The operation state of the port. Here are the valid values.
+ *
+ * * 0 - port is in STP *DISABLED* state. Make this port completely
+ * inactive for STP. This is also called BPDU filter and could be used
+ * to disable STP on an untrusted port, like a leaf virtual device.
+ * The traffic forwarding is also stopped on this port.
+ * * 1 - port is in STP *LISTENING* state. Only valid if STP is enabled
+ * on the bridge. In this state the port listens for STP BPDUs and
+ * drops all other traffic frames.
+ * * 2 - port is in STP *LEARNING* state. Only valid if STP is enabled on
+ * the bridge. In this state the port will accept traffic only for the
+ * purpose of updating MAC address tables.
+ * * 3 - port is in STP *FORWARDING* state. Port is fully active.
+ * * 4 - port is in STP *BLOCKING* state. Only valid if STP is enabled on
+ * the bridge. This state is used during the STP election process.
+ * In this state, port will only process STP BPDUs.
+ *
+ * @IFLA_BRPORT_PRIORITY
+ * The STP port priority. The valid values are between 0 and 255.
+ *
+ * @IFLA_BRPORT_COST
+ * The STP path cost of the port. The valid values are between 1 and 65535.
+ *
+ * @IFLA_BRPORT_MODE
+ * Set the bridge port mode. See *BRIDGE_MODE_HAIRPIN* for more details.
+ *
+ * @IFLA_BRPORT_GUARD
+ * Controls whether STP BPDUs will be processed by the bridge port. By
+ * default, the flag is turned off to allow BPDU processing. Turning this
+ * flag on will disable the bridge port if a STP BPDU packet is received.
+ *
+ * If the bridge has Spanning Tree enabled, hostile devices on the network
+ * may send BPDU on a port and cause network failure. Setting *guard on*
+ * will detect and stop this by disabling the port. The port will be
+ * restarted if the link is brought down, or removed and reattached.
+ *
+ * @IFLA_BRPORT_PROTECT
+ * Controls whether a given port is allowed to become a root port or not.
+ * Only used when STP is enabled on the bridge. By default the flag is off.
+ *
+ * This feature is also called root port guard. If BPDU is received from a
+ * leaf (edge) port, it should not be elected as root port. This could
+ * be used if using STP on a bridge and the downstream bridges are not fully
+ * trusted; this prevents a hostile guest from rerouting traffic.
+ *
+ * @IFLA_BRPORT_FAST_LEAVE
+ * This flag allows the bridge to immediately stop multicast traffic
+ * forwarding on a port that receives an IGMP Leave message. It is only used
+ * when IGMP snooping is enabled on the bridge. By default the flag is off.
+ *
+ * @IFLA_BRPORT_LEARNING
+ * Controls whether a given port will learn *source* MAC addresses from
+ * received traffic or not. Also controls whether dynamic FDB entries
+ * (which can also be added by software) will be refreshed by incoming
+ * traffic. By default this flag is on.
+ *
+ * @IFLA_BRPORT_UNICAST_FLOOD
+ * Controls whether unicast traffic for which there is no FDB entry will
+ * be flooded towards this port. By default this flag is on.
+ *
+ * @IFLA_BRPORT_PROXYARP
+ * Enable proxy ARP on this port.
+ *
+ * @IFLA_BRPORT_LEARNING_SYNC
+ * Controls whether a given port will sync MAC addresses learned on device
+ * port to bridge FDB.
+ *
+ * @IFLA_BRPORT_PROXYARP_WIFI
+ * Enable proxy ARP on this port which meets extended requirements by
+ * IEEE 802.11 and Hotspot 2.0 specifications.
+ *
+ * @IFLA_BRPORT_ROOT_ID
+ *
+ * @IFLA_BRPORT_BRIDGE_ID
+ *
+ * @IFLA_BRPORT_DESIGNATED_PORT
+ *
+ * @IFLA_BRPORT_DESIGNATED_COST
+ *
+ * @IFLA_BRPORT_ID
+ *
+ * @IFLA_BRPORT_NO
+ *
+ * @IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
+ *
+ * @IFLA_BRPORT_CONFIG_PENDING
+ *
+ * @IFLA_BRPORT_MESSAGE_AGE_TIMER
+ *
+ * @IFLA_BRPORT_FORWARD_DELAY_TIMER
+ *
+ * @IFLA_BRPORT_HOLD_TIMER
+ *
+ * @IFLA_BRPORT_FLUSH
+ * Flush bridge ports' fdb dynamic entries.
+ *
+ * @IFLA_BRPORT_MULTICAST_ROUTER
+ * Configure the port's multicast router presence. A port with
+ * a multicast router will receive all multicast traffic.
+ * The valid values are:
+ *
+ * * 0 disable multicast routers on this port
+ * * 1 let the system detect the presence of routers (default)
+ * * 2 permanently enable multicast traffic forwarding on this port
+ * * 3 enable multicast routers temporarily on this port, not depending
+ * on incoming queries.
+ *
+ * @IFLA_BRPORT_PAD
+ *
+ * @IFLA_BRPORT_MCAST_FLOOD
+ * Controls whether a given port will flood multicast traffic for which
+ * there is no MDB entry. By default this flag is on.
+ *
+ * @IFLA_BRPORT_MCAST_TO_UCAST
+ * Controls whether a given port will replicate packets using unicast
+ * instead of multicast. By default this flag is off.
+ *
+ * This is done by copying the packet per host and changing the multicast
+ * destination MAC to a unicast one accordingly.
+ *
+ * *mcast_to_unicast* works on top of the multicast snooping feature of the
+ * bridge. Which means unicast copies are only delivered to hosts which
+ * are interested in unicast and signaled this via IGMP/MLD reports previously.
+ *
+ * This feature is intended for interface types which have a more reliable
+ * and/or efficient way to deliver unicast packets than broadcast ones
+ * (e.g. WiFi).
+ *
+ * However, it should only be enabled on interfaces where no IGMPv2/MLDv1
+ * report suppression takes place. IGMP/MLD report suppression issue is
+ * usually overcome by the network daemon (supplicant) enabling AP isolation
+ * and by that separating all STAs.
+ *
+ * Delivery of STA-to-STA IP multicast is made possible again by enabling
+ * and utilizing the bridge hairpin mode, which considers the incoming port
+ * as a potential outgoing port, too (see *BRIDGE_MODE_HAIRPIN* option).
+ * Hairpin mode is performed after multicast snooping, therefore leading
+ * to only deliver reports to STAs running a multicast router.
+ *
+ * @IFLA_BRPORT_VLAN_TUNNEL
+ * Controls whether vlan to tunnel mapping is enabled on the port.
+ * By default this flag is off.
+ *
+ * @IFLA_BRPORT_BCAST_FLOOD
+ * Controls flooding of broadcast traffic on the given port. By default
+ * this flag is on.
+ *
+ * @IFLA_BRPORT_GROUP_FWD_MASK
+ * Set the group forward mask. This is a bitmask that is applied to
+ * decide whether to forward incoming frames destined to link-local
+ * addresses. The addresses of the form are 01:80:C2:00:00:0X (defaults
+ * to 0, which means the bridge does not forward any link-local frames
+ * coming on this port).
+ *
+ * @IFLA_BRPORT_NEIGH_SUPPRESS
+ * Controls whether neighbor discovery (arp and nd) proxy and suppression
+ * is enabled on the port. By default this flag is off.
+ *
+ * @IFLA_BRPORT_ISOLATED
+ * Controls whether a given port will be isolated, which means it will be
+ * able to communicate with non-isolated ports only. By default this
+ * flag is off.
+ *
+ * @IFLA_BRPORT_BACKUP_PORT
+ * Set a backup port. If the port loses carrier all traffic will be
+ * redirected to the configured backup port. Set the value to 0 to disable
+ * it.
+ *
+ * @IFLA_BRPORT_MRP_RING_OPEN
+ *
+ * @IFLA_BRPORT_MRP_IN_OPEN
+ *
+ * @IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT
+ * The number of per-port EHT hosts limit. The default value is 512.
+ * Setting to 0 is not allowed.
+ *
+ * @IFLA_BRPORT_MCAST_EHT_HOSTS_CNT
+ * The current number of tracked hosts, read only.
+ *
+ * @IFLA_BRPORT_LOCKED
+ * Controls whether a port will be locked, meaning that hosts behind the
+ * port will not be able to communicate through the port unless an FDB
+ * entry with the unit's MAC address is in the FDB. The common use case is
+ * that hosts are allowed access through authentication with the IEEE 802.1X
+ * protocol or based on whitelists. By default this flag is off.
+ *
+ * Please note that secure 802.1X deployments should always use the
+ * *BR_BOOLOPT_NO_LL_LEARN* flag, to not permit the bridge to populate its
+ * FDB based on link-local (EAPOL) traffic received on the port.
+ *
+ * @IFLA_BRPORT_MAB
+ * Controls whether a port will use MAC Authentication Bypass (MAB), a
+ * technique through which select MAC addresses may be allowed on a locked
+ * port, without using 802.1X authentication. Packets with an unknown source
+ * MAC address generates a "locked" FDB entry on the incoming bridge port.
+ * The common use case is for user space to react to these bridge FDB
+ * notifications and optionally replace the locked FDB entry with a normal
+ * one, allowing traffic to pass for whitelisted MAC addresses.
+ *
+ * Setting this flag also requires *IFLA_BRPORT_LOCKED* and
+ * *IFLA_BRPORT_LEARNING*. *IFLA_BRPORT_LOCKED* ensures that unauthorized
+ * data packets are dropped, and *IFLA_BRPORT_LEARNING* allows the dynamic
+ * FDB entries installed by user space (as replacements for the locked FDB
+ * entries) to be refreshed and/or aged out.
+ *
+ * @IFLA_BRPORT_MCAST_N_GROUPS
+ *
+ * @IFLA_BRPORT_MCAST_MAX_GROUPS
+ * Sets the maximum number of MDB entries that can be registered for a
+ * given port. Attempts to register more MDB entries at the port than this
+ * limit allows will be rejected, whether they are done through netlink
+ * (e.g. the bridge tool), or IGMP or MLD membership reports. Setting a
+ * limit of 0 disables the limit. The default value is 0.
+ *
+ * @IFLA_BRPORT_NEIGH_VLAN_SUPPRESS
+ * Controls whether neighbor discovery (arp and nd) proxy and suppression is
+ * enabled for a given port. By default this flag is off.
+ *
+ * Note that this option only takes effect when *IFLA_BRPORT_NEIGH_SUPPRESS*
+ * is enabled for a given port.
+ *
+ * @IFLA_BRPORT_BACKUP_NHID
+ * The FDB nexthop object ID to attach to packets being redirected to a
+ * backup port that has VLAN tunnel mapping enabled (via the
+ * *IFLA_BRPORT_VLAN_TUNNEL* option). Setting a value of 0 (default) has
+ * the effect of not attaching any ID.
+ */
enum {
IFLA_BRPORT_UNSPEC,
IFLA_BRPORT_STATE, /* Spanning tree state */
@@ -570,6 +1093,7 @@ enum {
IFLA_BRPORT_MCAST_N_GROUPS,
IFLA_BRPORT_MCAST_MAX_GROUPS,
IFLA_BRPORT_NEIGH_VLAN_SUPPRESS,
+ IFLA_BRPORT_BACKUP_NHID,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -755,6 +1279,30 @@ struct tunnel_msg {
__u32 ifindex;
};
+/* netkit section */
+enum netkit_action {
+ NETKIT_NEXT = -1,
+ NETKIT_PASS = 0,
+ NETKIT_DROP = 2,
+ NETKIT_REDIRECT = 7,
+};
+
+enum netkit_mode {
+ NETKIT_L2,
+ NETKIT_L3,
+};
+
+enum {
+ IFLA_NETKIT_UNSPEC,
+ IFLA_NETKIT_PEER_INFO,
+ IFLA_NETKIT_PRIMARY,
+ IFLA_NETKIT_POLICY,
+ IFLA_NETKIT_PEER_POLICY,
+ IFLA_NETKIT_MODE,
+ __IFLA_NETKIT_MAX,
+};
+#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1)
+
/* VXLAN section */
/* include statistics in the dump */
@@ -828,6 +1376,8 @@ enum {
IFLA_VXLAN_TTL_INHERIT,
IFLA_VXLAN_DF,
IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */
+ IFLA_VXLAN_LOCALBYPASS,
+ IFLA_VXLAN_LABEL_POLICY, /* IPv6 flow label policy; ifla_vxlan_label_policy */
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -845,6 +1395,13 @@ enum ifla_vxlan_df {
VXLAN_DF_MAX = __VXLAN_DF_END - 1,
};
+enum ifla_vxlan_label_policy {
+ VXLAN_LABEL_FIXED = 0,
+ VXLAN_LABEL_INHERIT = 1,
+ __VXLAN_LABEL_END,
+ VXLAN_LABEL_MAX = __VXLAN_LABEL_END - 1,
+};
+
/* GENEVE section */
enum {
IFLA_GENEVE_UNSPEC,
@@ -1390,7 +1947,9 @@ enum {
enum {
IFLA_DSA_UNSPEC,
- IFLA_DSA_MASTER,
+ IFLA_DSA_CONDUIT,
+ /* Deprecated, use IFLA_DSA_CONDUIT instead */
+ IFLA_DSA_MASTER = IFLA_DSA_CONDUIT,
__IFLA_DSA_MAX,
};
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index a78a8096f4ce..d31698410410 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -25,9 +25,21 @@
* application.
*/
#define XDP_USE_NEED_WAKEUP (1 << 3)
+/* By setting this option, userspace application indicates that it can
+ * handle multiple descriptors per packet thus enabling AF_XDP to split
+ * multi-buffer XDP frames into multiple Rx descriptors. Without this set
+ * such frames will be dropped.
+ */
+#define XDP_USE_SG (1 << 4)
/* Flags for xsk_umem_config flags */
-#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
+#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
+
+/* Force checksum calculation in software. Can be used for testing or
+ * working around potential HW issues. This option causes performance
+ * degradation and only works in XDP_COPY mode.
+ */
+#define XDP_UMEM_TX_SW_CSUM (1 << 1)
struct sockaddr_xdp {
__u16 sxdp_family;
@@ -70,6 +82,7 @@ struct xdp_umem_reg {
__u32 chunk_size;
__u32 headroom;
__u32 flags;
+ __u32 tx_metadata_len;
};
struct xdp_statistics {
@@ -99,6 +112,41 @@ struct xdp_options {
#define XSK_UNALIGNED_BUF_ADDR_MASK \
((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
+/* Request transmit timestamp. Upon completion, put it into tx_timestamp
+ * field of struct xsk_tx_metadata.
+ */
+#define XDP_TXMD_FLAGS_TIMESTAMP (1 << 0)
+
+/* Request transmit checksum offload. Checksum start position and offset
+ * are communicated via csum_start and csum_offset fields of struct
+ * xsk_tx_metadata.
+ */
+#define XDP_TXMD_FLAGS_CHECKSUM (1 << 1)
+
+/* AF_XDP offloads request. 'request' union member is consumed by the driver
+ * when the packet is being transmitted. 'completion' union member is
+ * filled by the driver when the transmit completion arrives.
+ */
+struct xsk_tx_metadata {
+ __u64 flags;
+
+ union {
+ struct {
+ /* XDP_TXMD_FLAGS_CHECKSUM */
+
+ /* Offset from desc->addr where checksumming should start. */
+ __u16 csum_start;
+ /* Offset from csum_start where checksum should be stored. */
+ __u16 csum_offset;
+ } request;
+
+ struct {
+ /* XDP_TXMD_FLAGS_TIMESTAMP */
+ __u64 tx_timestamp;
+ } completion;
+ };
+};
+
/* Rx/Tx descriptor */
struct xdp_desc {
__u64 addr;
@@ -108,4 +156,14 @@ struct xdp_desc {
/* UMEM descriptor is __u64 */
+/* Flag indicating that the packet continues with the buffer pointed out by the
+ * next frame in the ring. The end of the packet is signalled by setting this
+ * bit to zero. For single buffer packets, every descriptor has 'options' set
+ * to 0 and this maintains backward compatibility.
+ */
+#define XDP_PKT_CONTD (1 << 0)
+
+/* TX packet carries valid metadata. */
+#define XDP_TX_METADATA (1 << 1)
+
#endif /* _LINUX_IF_XDP_H */
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index c79f2f046a0b..f2e0b2d50e6b 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -47,6 +47,10 @@ enum iio_chan_type {
IIO_POSITIONRELATIVE,
IIO_PHASE,
IIO_MASSCONCENTRATION,
+ IIO_DELTA_ANGL,
+ IIO_DELTA_VELOCITY,
+ IIO_COLORTEMP,
+ IIO_CHROMATICITY,
};
enum iio_modifier {
@@ -101,6 +105,8 @@ enum iio_modifier {
IIO_MOD_PITCH,
IIO_MOD_YAW,
IIO_MOD_ROLL,
+ IIO_MOD_LIGHT_UVA,
+ IIO_MOD_LIGHT_UVB,
};
enum iio_event_type {
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index c4c53a9ab959..ff8d21f9e95b 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -145,7 +145,7 @@ struct in6_flowlabel_req {
#define IPV6_TLV_PADN 1
#define IPV6_TLV_ROUTERALERT 5
#define IPV6_TLV_CALIPSO 7 /* RFC 5570 */
-#define IPV6_TLV_IOAM 49 /* TEMPORARY IANA allocation for IOAM */
+#define IPV6_TLV_IOAM 49 /* RFC 9486 */
#define IPV6_TLV_JUMBO 194
#define IPV6_TLV_HAO 201 /* home address option */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 0716cb17e436..7a673b52827b 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -43,6 +43,10 @@ struct io_uring_sqe {
union {
__u64 addr; /* pointer to buffer or iovecs */
__u64 splice_off_in;
+ struct {
+ __u32 level;
+ __u32 optname;
+ };
};
__u32 len; /* buffer size or number of iovecs */
union {
@@ -65,6 +69,9 @@ struct io_uring_sqe {
__u32 xattr_flags;
__u32 msg_ring_flags;
__u32 uring_cmd_flags;
+ __u32 waitid_flags;
+ __u32 futex_flags;
+ __u32 install_fd_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
@@ -79,6 +86,7 @@ struct io_uring_sqe {
union {
__s32 splice_fd_in;
__u32 file_index;
+ __u32 optlen;
struct {
__u16 addr_len;
__u16 __pad3[1];
@@ -89,6 +97,7 @@ struct io_uring_sqe {
__u64 addr3;
__u64 __pad2[1];
};
+ __u64 optval;
/*
* If the ring is initialized with IORING_SETUP_SQE128, then
* this field is used for 80 bytes of arbitrary command data
@@ -173,6 +182,23 @@ enum {
*/
#define IORING_SETUP_DEFER_TASKRUN (1U << 13)
+/*
+ * Application provides the memory for the rings
+ */
+#define IORING_SETUP_NO_MMAP (1U << 14)
+
+/*
+ * Register the ring fd in itself for use with
+ * IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather
+ * than an fd.
+ */
+#define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15)
+
+/*
+ * Removes indirection through the SQ index array.
+ */
+#define IORING_SETUP_NO_SQARRAY (1U << 16)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -223,17 +249,24 @@ enum io_uring_op {
IORING_OP_URING_CMD,
IORING_OP_SEND_ZC,
IORING_OP_SENDMSG_ZC,
+ IORING_OP_READ_MULTISHOT,
+ IORING_OP_WAITID,
+ IORING_OP_FUTEX_WAIT,
+ IORING_OP_FUTEX_WAKE,
+ IORING_OP_FUTEX_WAITV,
+ IORING_OP_FIXED_FD_INSTALL,
/* this goes last, obviously */
IORING_OP_LAST,
};
/*
- * sqe->uring_cmd_flags
+ * sqe->uring_cmd_flags top 8bits aren't available for userspace
* IORING_URING_CMD_FIXED use registered buffer; pass this flag
* along with setting sqe->buf_index.
*/
#define IORING_URING_CMD_FIXED (1U << 0)
+#define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED
/*
@@ -285,11 +318,15 @@ enum io_uring_op {
* request 'user_data'
* IORING_ASYNC_CANCEL_ANY Match any request
* IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
+ * IORING_ASYNC_CANCEL_USERDATA Match on user_data, default for no other key
+ * IORING_ASYNC_CANCEL_OP Match request based on opcode
*/
#define IORING_ASYNC_CANCEL_ALL (1U << 0)
#define IORING_ASYNC_CANCEL_FD (1U << 1)
#define IORING_ASYNC_CANCEL_ANY (1U << 2)
#define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3)
+#define IORING_ASYNC_CANCEL_USERDATA (1U << 4)
+#define IORING_ASYNC_CANCEL_OP (1U << 5)
/*
* send/sendmsg and recv/recvmsg flags (sqe->ioprio)
@@ -352,6 +389,13 @@ enum {
#define IORING_MSG_RING_FLAGS_PASS (1U << 1)
/*
+ * IORING_OP_FIXED_FD_INSTALL flags (sqe->install_fd_flags)
+ *
+ * IORING_FIXED_FD_NO_CLOEXEC Don't mark the fd as O_CLOEXEC
+ */
+#define IORING_FIXED_FD_NO_CLOEXEC (1U << 0)
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
@@ -406,7 +450,7 @@ struct io_sqring_offsets {
__u32 dropped;
__u32 array;
__u32 resv1;
- __u64 resv2;
+ __u64 user_addr;
};
/*
@@ -425,7 +469,7 @@ struct io_cqring_offsets {
__u32 cqes;
__u32 flags;
__u32 resv1;
- __u64 resv2;
+ __u64 user_addr;
};
/*
@@ -523,6 +567,9 @@ enum {
/* register a range of fixed file slots for automatic slot allocation */
IORING_REGISTER_FILE_ALLOC_RANGE = 25,
+ /* return status information for a buffer group */
+ IORING_REGISTER_PBUF_STATUS = 26,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -649,6 +696,13 @@ struct io_uring_buf_reg {
__u64 resv[3];
};
+/* argument for IORING_REGISTER_PBUF_STATUS */
+struct io_uring_buf_status {
+ __u32 buf_group; /* input */
+ __u32 head; /* output */
+ __u32 resv[8];
+};
+
/*
* io_uring_restriction->opcode values
*/
@@ -683,7 +737,9 @@ struct io_uring_sync_cancel_reg {
__s32 fd;
__u32 flags;
struct __kernel_timespec timeout;
- __u64 pad[4];
+ __u8 opcode;
+ __u8 pad[7];
+ __u64 pad2[3];
};
/*
@@ -703,6 +759,16 @@ struct io_uring_recvmsg_out {
__u32 flags;
};
+/*
+ * Argument for IORING_OP_URING_CMD when file is a socket
+ */
+enum {
+ SOCKET_URING_OP_SIOCINQ = 0,
+ SOCKET_URING_OP_SIOCOUTQ,
+ SOCKET_URING_OP_GETSOCKOPT,
+ SOCKET_URING_OP_SETSOCKOPT,
+};
+
#ifdef __cplusplus
}
#endif
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 98ebba80cfa1..1dfeaa2e649e 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -45,6 +45,11 @@ enum {
IOMMUFD_CMD_IOAS_UNMAP,
IOMMUFD_CMD_OPTION,
IOMMUFD_CMD_VFIO_IOAS,
+ IOMMUFD_CMD_HWPT_ALLOC,
+ IOMMUFD_CMD_GET_HW_INFO,
+ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING,
+ IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP,
+ IOMMUFD_CMD_HWPT_INVALIDATE,
};
/**
@@ -344,4 +349,347 @@ struct iommu_vfio_ioas {
__u16 __reserved;
};
#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
+
+/**
+ * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation
+ * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as
+ * the parent HWPT in a nesting configuration.
+ * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is
+ * enforced on device attachment
+ */
+enum iommufd_hwpt_alloc_flags {
+ IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0,
+ IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1,
+};
+
+/**
+ * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table
+ * entry attributes
+ * @IOMMU_VTD_S1_SRE: Supervisor request
+ * @IOMMU_VTD_S1_EAFE: Extended access enable
+ * @IOMMU_VTD_S1_WPE: Write protect enable
+ */
+enum iommu_hwpt_vtd_s1_flags {
+ IOMMU_VTD_S1_SRE = 1 << 0,
+ IOMMU_VTD_S1_EAFE = 1 << 1,
+ IOMMU_VTD_S1_WPE = 1 << 2,
+};
+
+/**
+ * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table
+ * info (IOMMU_HWPT_DATA_VTD_S1)
+ * @flags: Combination of enum iommu_hwpt_vtd_s1_flags
+ * @pgtbl_addr: The base address of the stage-1 page table.
+ * @addr_width: The address width of the stage-1 page table
+ * @__reserved: Must be 0
+ */
+struct iommu_hwpt_vtd_s1 {
+ __aligned_u64 flags;
+ __aligned_u64 pgtbl_addr;
+ __u32 addr_width;
+ __u32 __reserved;
+};
+
+/**
+ * enum iommu_hwpt_data_type - IOMMU HWPT Data Type
+ * @IOMMU_HWPT_DATA_NONE: no data
+ * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table
+ */
+enum iommu_hwpt_data_type {
+ IOMMU_HWPT_DATA_NONE,
+ IOMMU_HWPT_DATA_VTD_S1,
+};
+
+/**
+ * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
+ * @size: sizeof(struct iommu_hwpt_alloc)
+ * @flags: Combination of enum iommufd_hwpt_alloc_flags
+ * @dev_id: The device to allocate this HWPT for
+ * @pt_id: The IOAS or HWPT to connect this HWPT to
+ * @out_hwpt_id: The ID of the new HWPT
+ * @__reserved: Must be 0
+ * @data_type: One of enum iommu_hwpt_data_type
+ * @data_len: Length of the type specific data
+ * @data_uptr: User pointer to the type specific data
+ *
+ * Explicitly allocate a hardware page table object. This is the same object
+ * type that is returned by iommufd_device_attach() and represents the
+ * underlying iommu driver's iommu_domain kernel object.
+ *
+ * A kernel-managed HWPT will be created with the mappings from the given
+ * IOAS via the @pt_id. The @data_type for this allocation must be set to
+ * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a
+ * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags.
+ *
+ * A user-managed nested HWPT will be created from a given parent HWPT via
+ * @pt_id, in which the parent HWPT must be allocated previously via the
+ * same ioctl from a given IOAS (@pt_id). In this case, the @data_type
+ * must be set to a pre-defined type corresponding to an I/O page table
+ * type supported by the underlying IOMMU hardware.
+ *
+ * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and
+ * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr
+ * must be given.
+ */
+struct iommu_hwpt_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 dev_id;
+ __u32 pt_id;
+ __u32 out_hwpt_id;
+ __u32 __reserved;
+ __u32 data_type;
+ __u32 data_len;
+ __aligned_u64 data_uptr;
+};
+#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
+
+/**
+ * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info
+ * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings
+ * on a nested_parent domain.
+ * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html
+ */
+enum iommu_hw_info_vtd_flags {
+ IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0,
+};
+
+/**
+ * struct iommu_hw_info_vtd - Intel VT-d hardware information
+ *
+ * @flags: Combination of enum iommu_hw_info_vtd_flags
+ * @__reserved: Must be 0
+ *
+ * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
+ * section 11.4.2 Capability Register.
+ * @ecap_reg: Value of Intel VT-d capability register defined in VT-d spec
+ * section 11.4.3 Extended Capability Register.
+ *
+ * User needs to understand the Intel VT-d specification to decode the
+ * register value.
+ */
+struct iommu_hw_info_vtd {
+ __u32 flags;
+ __u32 __reserved;
+ __aligned_u64 cap_reg;
+ __aligned_u64 ecap_reg;
+};
+
+/**
+ * enum iommu_hw_info_type - IOMMU Hardware Info Types
+ * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
+ * info
+ * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
+ */
+enum iommu_hw_info_type {
+ IOMMU_HW_INFO_TYPE_NONE,
+ IOMMU_HW_INFO_TYPE_INTEL_VTD,
+};
+
+/**
+ * enum iommufd_hw_capabilities
+ * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking
+ * If available, it means the following APIs
+ * are supported:
+ *
+ * IOMMU_HWPT_GET_DIRTY_BITMAP
+ * IOMMU_HWPT_SET_DIRTY_TRACKING
+ *
+ */
+enum iommufd_hw_capabilities {
+ IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0,
+};
+
+/**
+ * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
+ * @size: sizeof(struct iommu_hw_info)
+ * @flags: Must be 0
+ * @dev_id: The device bound to the iommufd
+ * @data_len: Input the length of a user buffer in bytes. Output the length of
+ * data that kernel supports
+ * @data_uptr: User pointer to a user-space buffer used by the kernel to fill
+ * the iommu type specific hardware information data
+ * @out_data_type: Output the iommu hardware info type as defined in the enum
+ * iommu_hw_info_type.
+ * @out_capabilities: Output the generic iommu capability info type as defined
+ * in the enum iommu_hw_capabilities.
+ * @__reserved: Must be 0
+ *
+ * Query an iommu type specific hardware information data from an iommu behind
+ * a given device that has been bound to iommufd. This hardware info data will
+ * be used to sync capabilities between the virtual iommu and the physical
+ * iommu, e.g. a nested translation setup needs to check the hardware info, so
+ * a guest stage-1 page table can be compatible with the physical iommu.
+ *
+ * To capture an iommu type specific hardware information data, @data_uptr and
+ * its length @data_len must be provided. Trailing bytes will be zeroed if the
+ * user buffer is larger than the data that kernel has. Otherwise, kernel only
+ * fills the buffer using the given length in @data_len. If the ioctl succeeds,
+ * @data_len will be updated to the length that kernel actually supports,
+ * @out_data_type will be filled to decode the data filled in the buffer
+ * pointed by @data_uptr. Input @data_len == zero is allowed.
+ */
+struct iommu_hw_info {
+ __u32 size;
+ __u32 flags;
+ __u32 dev_id;
+ __u32 data_len;
+ __aligned_u64 data_uptr;
+ __u32 out_data_type;
+ __u32 __reserved;
+ __aligned_u64 out_capabilities;
+};
+#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
+
+/*
+ * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty
+ * tracking
+ * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking
+ */
+enum iommufd_hwpt_set_dirty_tracking_flags {
+ IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1,
+};
+
+/**
+ * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING)
+ * @size: sizeof(struct iommu_hwpt_set_dirty_tracking)
+ * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags
+ * @hwpt_id: HW pagetable ID that represents the IOMMU domain
+ * @__reserved: Must be 0
+ *
+ * Toggle dirty tracking on an HW pagetable.
+ */
+struct iommu_hwpt_set_dirty_tracking {
+ __u32 size;
+ __u32 flags;
+ __u32 hwpt_id;
+ __u32 __reserved;
+};
+#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \
+ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING)
+
+/**
+ * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits
+ * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing
+ * any dirty bits metadata. This flag
+ * can be passed in the expectation
+ * where the next operation is an unmap
+ * of the same IOVA range.
+ *
+ */
+enum iommufd_hwpt_get_dirty_bitmap_flags {
+ IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1,
+};
+
+/**
+ * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP)
+ * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap)
+ * @hwpt_id: HW pagetable ID that represents the IOMMU domain
+ * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags
+ * @__reserved: Must be 0
+ * @iova: base IOVA of the bitmap first bit
+ * @length: IOVA range size
+ * @page_size: page size granularity of each bit in the bitmap
+ * @data: bitmap where to set the dirty bits. The bitmap bits each
+ * represent a page_size which you deviate from an arbitrary iova.
+ *
+ * Checking a given IOVA is dirty:
+ *
+ * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64))
+ *
+ * Walk the IOMMU pagetables for a given IOVA range to return a bitmap
+ * with the dirty IOVAs. In doing so it will also by default clear any
+ * dirty bit metadata set in the IOPTE.
+ */
+struct iommu_hwpt_get_dirty_bitmap {
+ __u32 size;
+ __u32 hwpt_id;
+ __u32 flags;
+ __u32 __reserved;
+ __aligned_u64 iova;
+ __aligned_u64 length;
+ __aligned_u64 page_size;
+ __aligned_u64 data;
+};
+#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \
+ IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP)
+
+/**
+ * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation
+ * Data Type
+ * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
+ */
+enum iommu_hwpt_invalidate_data_type {
+ IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
+};
+
+/**
+ * enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d
+ * stage-1 cache invalidation
+ * @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies
+ * to all-levels page structure cache or just
+ * the leaf PTE cache.
+ */
+enum iommu_hwpt_vtd_s1_invalidate_flags {
+ IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0,
+};
+
+/**
+ * struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation
+ * (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1)
+ * @addr: The start address of the range to be invalidated. It needs to
+ * be 4KB aligned.
+ * @npages: Number of contiguous 4K pages to be invalidated.
+ * @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags
+ * @__reserved: Must be 0
+ *
+ * The Intel VT-d specific invalidation data for user-managed stage-1 cache
+ * invalidation in nested translation. Userspace uses this structure to
+ * tell the impacted cache scope after modifying the stage-1 page table.
+ *
+ * Invalidating all the caches related to the page table by setting @addr
+ * to be 0 and @npages to be U64_MAX.
+ *
+ * The device TLB will be invalidated automatically if ATS is enabled.
+ */
+struct iommu_hwpt_vtd_s1_invalidate {
+ __aligned_u64 addr;
+ __aligned_u64 npages;
+ __u32 flags;
+ __u32 __reserved;
+};
+
+/**
+ * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE)
+ * @size: sizeof(struct iommu_hwpt_invalidate)
+ * @hwpt_id: ID of a nested HWPT for cache invalidation
+ * @data_uptr: User pointer to an array of driver-specific cache invalidation
+ * data.
+ * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data
+ * type of all the entries in the invalidation request array. It
+ * should be a type supported by the hwpt pointed by @hwpt_id.
+ * @entry_len: Length (in bytes) of a request entry in the request array
+ * @entry_num: Input the number of cache invalidation requests in the array.
+ * Output the number of requests successfully handled by kernel.
+ * @__reserved: Must be 0.
+ *
+ * Invalidate the iommu cache for user-managed page table. Modifications on a
+ * user-managed page table should be followed by this operation to sync cache.
+ * Each ioctl can support one or more cache invalidation requests in the array
+ * that has a total size of @entry_len * @entry_num.
+ *
+ * An empty invalidation request array by setting @entry_num==0 is allowed, and
+ * @entry_len and @data_uptr would be ignored in this case. This can be used to
+ * check if the given @data_type is supported or not by kernel.
+ */
+struct iommu_hwpt_invalidate {
+ __u32 size;
+ __u32 hwpt_id;
+ __aligned_u64 data_uptr;
+ __u32 data_type;
+ __u32 entry_len;
+ __u32 entry_num;
+ __u32 __reserved;
+};
+#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE)
#endif
diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h
index f70f2596a6bf..bee2bdb0eedb 100644
--- a/include/uapi/linux/ioprio.h
+++ b/include/uapi/linux/ioprio.h
@@ -2,22 +2,23 @@
#ifndef _UAPI_LINUX_IOPRIO_H
#define _UAPI_LINUX_IOPRIO_H
+#include <linux/stddef.h>
+#include <linux/types.h>
+
/*
* Gives us 8 prio classes with 13-bits of data for each class
*/
#define IOPRIO_CLASS_SHIFT 13
-#define IOPRIO_CLASS_MASK 0x07
+#define IOPRIO_NR_CLASSES 8
+#define IOPRIO_CLASS_MASK (IOPRIO_NR_CLASSES - 1)
#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
#define IOPRIO_PRIO_CLASS(ioprio) \
(((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK)
#define IOPRIO_PRIO_DATA(ioprio) ((ioprio) & IOPRIO_PRIO_MASK)
-#define IOPRIO_PRIO_VALUE(class, data) \
- ((((class) & IOPRIO_CLASS_MASK) << IOPRIO_CLASS_SHIFT) | \
- ((data) & IOPRIO_PRIO_MASK))
/*
- * These are the io priority groups as implemented by the BFQ and mq-deadline
+ * These are the io priority classes as implemented by the BFQ and mq-deadline
* schedulers. RT is the realtime class, it always gets premium service. For
* ATA disks supporting NCQ IO priority, RT class IOs will be processed using
* high priority NCQ commands. BE is the best-effort scheduling class, the
@@ -25,18 +26,30 @@
* served when no one else is using the disk.
*/
enum {
- IOPRIO_CLASS_NONE,
- IOPRIO_CLASS_RT,
- IOPRIO_CLASS_BE,
- IOPRIO_CLASS_IDLE,
+ IOPRIO_CLASS_NONE = 0,
+ IOPRIO_CLASS_RT = 1,
+ IOPRIO_CLASS_BE = 2,
+ IOPRIO_CLASS_IDLE = 3,
+
+ /* Special class to indicate an invalid ioprio value */
+ IOPRIO_CLASS_INVALID = 7,
};
/*
- * The RT and BE priority classes both support up to 8 priority levels.
+ * The RT and BE priority classes both support up to 8 priority levels that
+ * can be specified using the lower 3-bits of the priority data.
*/
-#define IOPRIO_NR_LEVELS 8
-#define IOPRIO_BE_NR IOPRIO_NR_LEVELS
+#define IOPRIO_LEVEL_NR_BITS 3
+#define IOPRIO_NR_LEVELS (1 << IOPRIO_LEVEL_NR_BITS)
+#define IOPRIO_LEVEL_MASK (IOPRIO_NR_LEVELS - 1)
+#define IOPRIO_PRIO_LEVEL(ioprio) ((ioprio) & IOPRIO_LEVEL_MASK)
+
+#define IOPRIO_BE_NR IOPRIO_NR_LEVELS
+/*
+ * Possible values for the "which" argument of the ioprio_get() and
+ * ioprio_set() system calls (see "man ioprio_set").
+ */
enum {
IOPRIO_WHO_PROCESS = 1,
IOPRIO_WHO_PGRP,
@@ -44,9 +57,71 @@ enum {
};
/*
- * Fallback BE priority level.
+ * Fallback BE class priority level.
*/
#define IOPRIO_NORM 4
#define IOPRIO_BE_NORM IOPRIO_NORM
+/*
+ * The 10 bits between the priority class and the priority level are used to
+ * optionally define I/O hints for any combination of I/O priority class and
+ * level. Depending on the kernel configuration, I/O scheduler being used and
+ * the target I/O device being used, hints can influence how I/Os are processed
+ * without affecting the I/O scheduling ordering defined by the I/O priority
+ * class and level.
+ */
+#define IOPRIO_HINT_SHIFT IOPRIO_LEVEL_NR_BITS
+#define IOPRIO_HINT_NR_BITS 10
+#define IOPRIO_NR_HINTS (1 << IOPRIO_HINT_NR_BITS)
+#define IOPRIO_HINT_MASK (IOPRIO_NR_HINTS - 1)
+#define IOPRIO_PRIO_HINT(ioprio) \
+ (((ioprio) >> IOPRIO_HINT_SHIFT) & IOPRIO_HINT_MASK)
+
+/*
+ * I/O hints.
+ */
+enum {
+ /* No hint */
+ IOPRIO_HINT_NONE = 0,
+
+ /*
+ * Device command duration limits: indicate to the device a desired
+ * duration limit for the commands that will be used to process an I/O.
+ * These will currently only be effective for SCSI and ATA devices that
+ * support the command duration limits feature. If this feature is
+ * enabled, then the commands issued to the device to process an I/O with
+ * one of these hints set will have the duration limit index (dld field)
+ * set to the value of the hint.
+ */
+ IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6,
+ IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7,
+};
+
+#define IOPRIO_BAD_VALUE(val, max) ((val) < 0 || (val) >= (max))
+
+/*
+ * Return an I/O priority value based on a class, a level and a hint.
+ */
+static __always_inline __u16 ioprio_value(int prioclass, int priolevel,
+ int priohint)
+{
+ if (IOPRIO_BAD_VALUE(prioclass, IOPRIO_NR_CLASSES) ||
+ IOPRIO_BAD_VALUE(priolevel, IOPRIO_NR_LEVELS) ||
+ IOPRIO_BAD_VALUE(priohint, IOPRIO_NR_HINTS))
+ return IOPRIO_CLASS_INVALID << IOPRIO_CLASS_SHIFT;
+
+ return (prioclass << IOPRIO_CLASS_SHIFT) |
+ (priohint << IOPRIO_HINT_SHIFT) | priolevel;
+}
+
+#define IOPRIO_PRIO_VALUE(prioclass, priolevel) \
+ ioprio_value(prioclass, priolevel, IOPRIO_HINT_NONE)
+#define IOPRIO_PRIO_VALUE_HINT(prioclass, priolevel, priohint) \
+ ioprio_value(prioclass, priolevel, priohint)
+
#endif /* _UAPI_LINUX_IOPRIO_H */
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index ac56605fe9bc..cf592d7b630f 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -198,6 +198,7 @@ enum {
DEVCONF_IOAM6_ID_WIDE,
DEVCONF_NDISC_EVICT_NOCARRIER,
DEVCONF_ACCEPT_UNTRACKED_NA,
+ DEVCONF_ACCEPT_RA_MIN_LFT,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 981016e05cfa..c17bb096ea68 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -12,6 +12,7 @@
/* kexec flags for different usage scenarios */
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_PRESERVE_CONTEXT 0x00000002
+#define KEXEC_UPDATE_ELFCOREHDR 0x00000004
#define KEXEC_ARCH_MASK 0xffff0000
/*
@@ -24,6 +25,7 @@
#define KEXEC_FILE_UNLOAD 0x00000001
#define KEXEC_FILE_ON_CRASH 0x00000002
#define KEXEC_FILE_NO_INITRAMFS 0x00000004
+#define KEXEC_FILE_DEBUG 0x00000008
/* These values match the ELF architecture values.
* Unless there is a good reason that should continue to be the case.
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 2da5c3ad71bd..f0ed68974c54 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -38,9 +38,11 @@
* - 1.10 - Add SMI profiler event log
* - 1.11 - Add unified memory for ctx save/restore area
* - 1.12 - Add DMA buf export ioctl
+ * - 1.13 - Add debugger API
+ * - 1.14 - Update kfd_event_data
*/
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 12
+#define KFD_IOCTL_MINOR_VERSION 14
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -110,6 +112,32 @@ struct kfd_ioctl_get_available_memory_args {
__u32 pad;
};
+struct kfd_dbg_device_info_entry {
+ __u64 exception_status;
+ __u64 lds_base;
+ __u64 lds_limit;
+ __u64 scratch_base;
+ __u64 scratch_limit;
+ __u64 gpuvm_base;
+ __u64 gpuvm_limit;
+ __u32 gpu_id;
+ __u32 location_id;
+ __u32 vendor_id;
+ __u32 device_id;
+ __u32 revision_id;
+ __u32 subsystem_vendor_id;
+ __u32 subsystem_device_id;
+ __u32 fw_version;
+ __u32 gfx_target_version;
+ __u32 simd_count;
+ __u32 max_waves_per_simd;
+ __u32 array_count;
+ __u32 simd_arrays_per_engine;
+ __u32 num_xcc;
+ __u32 capability;
+ __u32 debug_prop;
+};
+
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
#define KFD_IOC_CACHE_POLICY_COHERENT 0
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@@ -293,12 +321,20 @@ struct kfd_hsa_hw_exception_data {
__u32 gpu_id;
};
+/* hsa signal event data */
+struct kfd_hsa_signal_event_data {
+ __u64 last_event_age; /* to and from KFD */
+};
+
/* Event data */
struct kfd_event_data {
union {
+ /* From KFD */
struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_hsa_hw_exception_data hw_exception_data;
- }; /* From KFD */
+ /* To and From KFD */
+ struct kfd_hsa_signal_event_data signal_event_data;
+ };
__u64 kfd_event_data_ext; /* pointer to an extension structure
for future exception types */
__u32 event_id; /* to KFD */
@@ -369,6 +405,7 @@ struct kfd_ioctl_acquire_vm_args {
#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
+#define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24)
/* Allocate memory for later SVM (shared virtual memory) mapping.
*
@@ -623,6 +660,8 @@ enum kfd_mmio_remap {
#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
/* Keep GPU memory mapping always valid as if XNACK is disable */
#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
+/* Fine grained coherency between all devices using device-scope atomics */
+#define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080
/**
* kfd_ioctl_svm_op - SVM ioctl operations
@@ -773,6 +812,640 @@ struct kfd_ioctl_set_xnack_mode_args {
__s32 xnack_enabled;
};
+/* Wave launch override modes */
+enum kfd_dbg_trap_override_mode {
+ KFD_DBG_TRAP_OVERRIDE_OR = 0,
+ KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
+};
+
+/* Wave launch overrides */
+enum kfd_dbg_trap_mask {
+ KFD_DBG_TRAP_MASK_FP_INVALID = 1,
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
+ KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
+};
+
+/* Wave launch modes */
+enum kfd_dbg_trap_wave_launch_mode {
+ KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
+ KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
+ KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
+};
+
+/* Address watch modes */
+enum kfd_dbg_trap_address_watch_mode {
+ KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
+ KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
+ KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
+ KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
+};
+
+/* Additional wave settings */
+enum kfd_dbg_trap_flags {
+ KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
+};
+
+/* Trap exceptions */
+enum kfd_dbg_trap_exception_code {
+ EC_NONE = 0,
+ /* per queue */
+ EC_QUEUE_WAVE_ABORT = 1,
+ EC_QUEUE_WAVE_TRAP = 2,
+ EC_QUEUE_WAVE_MATH_ERROR = 3,
+ EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
+ EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
+ EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
+ EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
+ EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
+ EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
+ EC_QUEUE_PACKET_RESERVED = 19,
+ EC_QUEUE_PACKET_UNSUPPORTED = 20,
+ EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
+ EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
+ EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
+ EC_QUEUE_PREEMPTION_ERROR = 30,
+ EC_QUEUE_NEW = 31,
+ /* per device */
+ EC_DEVICE_QUEUE_DELETE = 32,
+ EC_DEVICE_MEMORY_VIOLATION = 33,
+ EC_DEVICE_RAS_ERROR = 34,
+ EC_DEVICE_FATAL_HALT = 35,
+ EC_DEVICE_NEW = 36,
+ /* per process */
+ EC_PROCESS_RUNTIME = 48,
+ EC_PROCESS_DEVICE_REMOVE = 49,
+ EC_MAX
+};
+
+/* Mask generated by ecode in kfd_dbg_trap_exception_code */
+#define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
+
+/* Masks for exception code type checks below */
+#define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \
+ KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \
+ KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \
+ KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \
+ KFD_EC_MASK(EC_QUEUE_NEW))
+#define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \
+ KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \
+ KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \
+ KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \
+ KFD_EC_MASK(EC_DEVICE_NEW))
+#define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \
+ KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
+
+/* Checks for exception code types for KFD search */
+#define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \
+ (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
+#define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \
+ (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
+#define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \
+ (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
+
+
+/* Runtime enable states */
+enum kfd_dbg_runtime_state {
+ DEBUG_RUNTIME_STATE_DISABLED = 0,
+ DEBUG_RUNTIME_STATE_ENABLED = 1,
+ DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
+ DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
+};
+
+/* Runtime enable status */
+struct kfd_runtime_info {
+ __u64 r_debug;
+ __u32 runtime_state;
+ __u32 ttmp_setup;
+};
+
+/* Enable modes for runtime enable */
+#define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
+#define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
+
+/**
+ * kfd_ioctl_runtime_enable_args - Arguments for runtime enable
+ *
+ * Coordinates debug exception signalling and debug device enablement with runtime.
+ *
+ * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
+ * @mode_mask - mask to set mode
+ * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
+ * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
+ * @capabilities_mask - mask to notify runtime on what KFD supports
+ *
+ * Return - 0 on SUCCESS.
+ * - EBUSY if runtime enable call already pending.
+ * - EEXIST if user queues already active prior to call.
+ * If process is debug enabled, runtime enable will enable debug devices and
+ * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
+ * to unblock - see kfd_ioctl_dbg_trap_args.
+ *
+ */
+struct kfd_ioctl_runtime_enable_args {
+ __u64 r_debug;
+ __u32 mode_mask;
+ __u32 capabilities_mask;
+};
+
+/* Queue information */
+struct kfd_queue_snapshot_entry {
+ __u64 exception_status;
+ __u64 ring_base_address;
+ __u64 write_pointer_address;
+ __u64 read_pointer_address;
+ __u64 ctx_save_restore_address;
+ __u32 queue_id;
+ __u32 gpu_id;
+ __u32 ring_size;
+ __u32 queue_type;
+ __u32 ctx_save_restore_area_size;
+ __u32 reserved;
+};
+
+/* Queue status return for suspend/resume */
+#define KFD_DBG_QUEUE_ERROR_BIT 30
+#define KFD_DBG_QUEUE_INVALID_BIT 31
+#define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
+#define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
+
+/* Context save area header information */
+struct kfd_context_save_area_header {
+ struct {
+ __u32 control_stack_offset;
+ __u32 control_stack_size;
+ __u32 wave_state_offset;
+ __u32 wave_state_size;
+ } wave_state;
+ __u32 debug_offset;
+ __u32 debug_size;
+ __u64 err_payload_addr;
+ __u32 err_event_id;
+ __u32 reserved1;
+};
+
+/*
+ * Debug operations
+ *
+ * For specifics on usage and return values, see documentation per operation
+ * below. Otherwise, generic error returns apply:
+ * - ESRCH if the process to debug does not exist.
+ *
+ * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
+ * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
+ * Also returns this error if GPU hardware scheduling is not supported.
+ *
+ * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
+ * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
+ * clean up of debug mode as long as process is debug enabled.
+ *
+ * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when
+ * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
+ *
+ * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
+ *
+ * - Other errors may be returned when a DBG_HW_OP occurs while the GPU
+ * is in a fatal state.
+ *
+ */
+enum kfd_dbg_trap_operations {
+ KFD_IOC_DBG_TRAP_ENABLE = 0,
+ KFD_IOC_DBG_TRAP_DISABLE = 1,
+ KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
+ KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
+ KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */
+ KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
+ KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
+ KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
+ KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
+ KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
+};
+
+/**
+ * kfd_ioctl_dbg_trap_enable_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_ENABLE.
+ *
+ * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
+ * kfd_ioctl_dbg_trap_args to disable debug session.
+ *
+ * @exception_mask (IN) - exceptions to raise to the debugger
+ * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info)
+ * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes
+ * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised
+ * exceptions set in exception_mask.
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
+ * Size of kfd_runtime saved by the KFD returned to @rinfo_size.
+ * - EBADF if KFD cannot get a reference to dbg_fd.
+ * - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
+ * - EINVAL if target process is already debug enabled.
+ *
+ */
+struct kfd_ioctl_dbg_trap_enable_args {
+ __u64 exception_mask;
+ __u64 rinfo_ptr;
+ __u32 rinfo_size;
+ __u32 dbg_fd;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_send_runtime_event_args
+ *
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
+ * Raises exceptions to runtime.
+ *
+ * @exception_mask (IN) - exceptions to raise to runtime
+ * @gpu_id (IN) - target device id
+ * @queue_id (IN) - target queue id
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * - ENODEV if gpu_id not found.
+ * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
+ * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
+ * All other exceptions are raised to runtime through err_payload_addr.
+ * See kfd_context_save_area_header.
+ */
+struct kfd_ioctl_dbg_trap_send_runtime_event_args {
+ __u64 exception_mask;
+ __u32 gpu_id;
+ __u32 queue_id;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_exceptions_enabled_args
+ *
+ * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
+ * Set new exceptions to be raised to the debugger.
+ *
+ * @exception_mask (IN) - new exceptions to raise the debugger
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ */
+struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
+ __u64 exception_mask;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_wave_launch_override_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
+ * Enable HW exceptions to raise trap.
+ *
+ * @override_mode (IN) - see kfd_dbg_trap_override_mode
+ * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask.
+ * IN is the override modes requested to be enabled.
+ * OUT is referenced in Return below.
+ * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
+ * IN is the override modes requested for support check.
+ * OUT is referenced in Return below.
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Previous enablement is returned in @enable_mask.
+ * Actual override support is returned in @support_request_mask.
+ * - EINVAL if override mode is not supported.
+ * - EACCES if trap support requested is not actually supported.
+ * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
+ * Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
+ */
+struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
+ __u32 override_mode;
+ __u32 enable_mask;
+ __u32 support_request_mask;
+ __u32 pad;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_wave_launch_mode_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
+ * Set wave launch mode.
+ *
+ * @mode (IN) - see kfd_dbg_trap_wave_launch_mode
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ */
+struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
+ __u32 launch_mode;
+ __u32 pad;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_suspend_queues_ags
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
+ * Suspend queues.
+ *
+ * @exception_mask (IN) - raised exceptions to clear
+ * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
+ * to suspend
+ * @num_queues (IN) - number of queues to suspend in @queue_array_ptr
+ * @grace_period (IN) - wave time allowance before preemption
+ * per 1K GPU clock cycle unit
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Destruction of a suspended queue is blocked until the queue is
+ * resumed. This allows the debugger to access queue information and
+ * the its context save area without running into a race condition on
+ * queue destruction.
+ * Automatically copies per queue context save area header information
+ * into the save area base
+ * (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
+ *
+ * Return - Number of queues suspended on SUCCESS.
+ * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
+ * for each queue id in @queue_array_ptr array reports unsuccessful
+ * suspend reason.
+ * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
+ * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
+ * is being destroyed.
+ */
+struct kfd_ioctl_dbg_trap_suspend_queues_args {
+ __u64 exception_mask;
+ __u64 queue_array_ptr;
+ __u32 num_queues;
+ __u32 grace_period;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_resume_queues_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
+ * Resume queues.
+ *
+ * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
+ * to resume
+ * @num_queues (IN) - number of queues to resume in @queue_array_ptr
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - Number of queues resumed on SUCCESS.
+ * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
+ * for each queue id in @queue_array_ptr array reports unsuccessful
+ * resume reason.
+ * KFD_DBG_QUEUE_ERROR_MASK = HW failure.
+ * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
+ */
+struct kfd_ioctl_dbg_trap_resume_queues_args {
+ __u64 queue_array_ptr;
+ __u32 num_queues;
+ __u32 pad;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_node_address_watch_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
+ * Sets address watch for device.
+ *
+ * @address (IN) - watch address to set
+ * @mode (IN) - see kfd_dbg_trap_address_watch_mode
+ * @mask (IN) - watch address mask
+ * @gpu_id (IN) - target gpu to set watch point
+ * @id (OUT) - watch id allocated
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Allocated watch ID returned to @id.
+ * - ENODEV if gpu_id not found.
+ * - ENOMEM if watch IDs can be allocated
+ */
+struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
+ __u64 address;
+ __u32 mode;
+ __u32 mask;
+ __u32 gpu_id;
+ __u32 id;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_clear_node_address_watch_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
+ * Clear address watch for device.
+ *
+ * @gpu_id (IN) - target device to clear watch point
+ * @id (IN) - allocated watch id to clear
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * - ENODEV if gpu_id not found.
+ * - EINVAL if watch ID has not been allocated.
+ */
+struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
+ __u32 gpu_id;
+ __u32 id;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_set_flags_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
+ * Sets flags for wave behaviour.
+ *
+ * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * - EACCESS if any debug device does not allow flag options.
+ */
+struct kfd_ioctl_dbg_trap_set_flags_args {
+ __u32 flags;
+ __u32 pad;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_query_debug_event_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
+ *
+ * Find one or more raised exceptions. This function can return multiple
+ * exceptions from a single queue or a single device with one call. To find
+ * all raised exceptions, this function must be called repeatedly until it
+ * returns -EAGAIN. Returned exceptions can optionally be cleared by
+ * setting the corresponding bit in the @exception_mask input parameter.
+ * However, clearing an exception prevents retrieving further information
+ * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
+ *
+ * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
+ * @gpu_id (OUT) - gpu id of exceptions raised
+ * @queue_id (OUT) - queue id of exceptions raised
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on raised exception found
+ * Raised exceptions found are returned in @exception mask
+ * with reported source id returned in @gpu_id or @queue_id.
+ * - EAGAIN if no raised exception has been found
+ */
+struct kfd_ioctl_dbg_trap_query_debug_event_args {
+ __u64 exception_mask;
+ __u32 gpu_id;
+ __u32 queue_id;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_query_exception_info_args
+ *
+ * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
+ * Get additional info on raised exception.
+ *
+ * @info_ptr (IN) - pointer to exception info buffer to copy to
+ * @info_size (IN/OUT) - exception info buffer size (bytes)
+ * @source_id (IN) - target gpu or queue id
+ * @exception_code (IN) - target exception
+ * @clear_exception (IN) - clear raised @exception_code exception
+ * (0 = false, 1 = true)
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
+ * bytes of memory exception data to @info_ptr.
+ * If @exception_code is EC_PROCESS_RUNTIME, copy saved
+ * kfd_runtime_info to @info_ptr.
+ * Actual required @info_ptr size (bytes) is returned in @info_size.
+ */
+struct kfd_ioctl_dbg_trap_query_exception_info_args {
+ __u64 info_ptr;
+ __u32 info_size;
+ __u32 source_id;
+ __u32 exception_code;
+ __u32 clear_exception;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_get_queue_snapshot_args
+ *
+ * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
+ * Get queue information.
+ *
+ * @exception_mask (IN) - exceptions raised to clear
+ * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
+ * @num_queues (IN/OUT) - number of queue snapshot entries
+ * The debugger specifies the size of the array allocated in @num_queues.
+ * KFD returns the number of queues that actually existed. If this is
+ * larger than the size specified by the debugger, KFD will not overflow
+ * the array allocated by the debugger.
+ *
+ * @entry_size (IN/OUT) - size per entry in bytes
+ * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
+ * @entry_size. KFD returns the number of bytes actually populated per
+ * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
+ * which fields in struct kfd_queue_snapshot_entry are valid. This allows
+ * growing the ABI in a backwards compatible manner.
+ * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
+ * event that it's larger than actual kfd_queue_snapshot_entry.
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
+ * into @snapshot_buf_ptr if @num_queues(IN) > 0.
+ * Otherwise return @num_queues(OUT) queue snapshot entries that exist.
+ */
+struct kfd_ioctl_dbg_trap_queue_snapshot_args {
+ __u64 exception_mask;
+ __u64 snapshot_buf_ptr;
+ __u32 num_queues;
+ __u32 entry_size;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_get_device_snapshot_args
+ *
+ * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
+ * Get device information.
+ *
+ * @exception_mask (IN) - exceptions raised to clear
+ * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
+ * @num_devices (IN/OUT) - number of debug devices to snapshot
+ * The debugger specifies the size of the array allocated in @num_devices.
+ * KFD returns the number of devices that actually existed. If this is
+ * larger than the size specified by the debugger, KFD will not overflow
+ * the array allocated by the debugger.
+ *
+ * @entry_size (IN/OUT) - size per entry in bytes
+ * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
+ * @entry_size. KFD returns the number of bytes actually populated. The
+ * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
+ * in struct kfd_dbg_device_info_entry are valid. This allows growing the
+ * ABI in a backwards compatible manner.
+ * Note that entry_size(IN) should still be used to stride the snapshot buffer in the
+ * event that it's larger than actual kfd_dbg_device_info_entry.
+ *
+ * Generic errors apply (see kfd_dbg_trap_operations).
+ * Return - 0 on SUCCESS.
+ * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
+ * into @snapshot_buf_ptr if @num_devices(IN) > 0.
+ * Otherwise return @num_devices(OUT) queue snapshot entries that exist.
+ */
+struct kfd_ioctl_dbg_trap_device_snapshot_args {
+ __u64 exception_mask;
+ __u64 snapshot_buf_ptr;
+ __u32 num_devices;
+ __u32 entry_size;
+};
+
+/**
+ * kfd_ioctl_dbg_trap_args
+ *
+ * Arguments to debug target process.
+ *
+ * @pid - target process to debug
+ * @op - debug operation (see kfd_dbg_trap_operations)
+ *
+ * @op determines which union struct args to use.
+ * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
+ */
+struct kfd_ioctl_dbg_trap_args {
+ __u32 pid;
+ __u32 op;
+
+ union {
+ struct kfd_ioctl_dbg_trap_enable_args enable;
+ struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
+ struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
+ struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
+ struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
+ struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
+ struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
+ struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
+ struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
+ struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
+ struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
+ struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
+ struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
+ struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
+ };
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -887,7 +1560,13 @@ struct kfd_ioctl_set_xnack_mode_args {
#define AMDKFD_IOC_EXPORT_DMABUF \
AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
+#define AMDKFD_IOC_RUNTIME_ENABLE \
+ AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
+
+#define AMDKFD_IOC_DBG_TRAP \
+ AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x25
+#define AMDKFD_COMMAND_END 0x27
#endif
diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h
index 3e330f368917..a51b7331e0b4 100644
--- a/include/uapi/linux/kfd_sysfs.h
+++ b/include/uapi/linux/kfd_sysfs.h
@@ -43,6 +43,11 @@
#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+#define HSA_CAP_TRAP_DEBUG_SUPPORT 0x00008000
+#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_TRAP_OVERRIDE_SUPPORTED 0x00010000
+#define HSA_CAP_TRAP_DEBUG_WAVE_LAUNCH_MODE_SUPPORTED 0x00020000
+#define HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED 0x00040000
+
/* Old buggy user mode depends on this being 0 */
#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000
@@ -53,8 +58,18 @@
#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000
#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000
#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000
+#define HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED 0x20000000
#define HSA_CAP_RESERVED 0xe00f8000
+/* debug_prop bits in node properties */
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f
+#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_SHIFT 0
+#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_MASK 0x000003f0
+#define HSA_DBG_WATCH_ADDR_MASK_HI_BIT_SHIFT 4
+#define HSA_DBG_DISPATCH_INFO_ALWAYS_VALID 0x00000400
+#define HSA_DBG_WATCHPOINTS_EXCLUSIVE 0x00000800
+#define HSA_DBG_RESERVED 0xfffffffffffff000ull
+
/* Heap types in memory properties */
#define HSA_MEM_HEAP_TYPE_SYSTEM 0
#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 737318b1c1d9..c3308536482b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -16,76 +16,6 @@
#define KVM_API_VERSION 12
-/* *** Deprecated interfaces *** */
-
-#define KVM_TRC_SHIFT 16
-
-#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
-#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1))
-
-#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
-#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
-#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
-
-#define KVM_TRC_HEAD_SIZE 12
-#define KVM_TRC_CYCLE_SIZE 8
-#define KVM_TRC_EXTRA_MAX 7
-
-#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
-#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
-#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
-#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
-#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
-#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
-#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
-#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
-#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
-#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
-#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
-#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
-#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
-#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
-#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
-#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
-#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
-#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
-#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
-#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
-#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
-#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
-#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
-#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
-
-struct kvm_user_trace_setup {
- __u32 buf_size;
- __u32 buf_nr;
-};
-
-#define __KVM_DEPRECATED_MAIN_W_0x06 \
- _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
-#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
-#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
-
-#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
-
-struct kvm_breakpoint {
- __u32 enabled;
- __u32 padding;
- __u64 address;
-};
-
-struct kvm_debug_guest {
- __u32 enabled;
- __u32 pad;
- struct kvm_breakpoint breakpoints[4];
- __u32 singlestep;
-};
-
-#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
-
-/* *** End of deprecated interfaces *** */
-
-
/* for KVM_SET_USER_MEMORY_REGION */
struct kvm_userspace_memory_region {
__u32 slot;
@@ -95,6 +25,19 @@ struct kvm_userspace_memory_region {
__u64 userspace_addr; /* start of the userspace allocated memory */
};
+/* for KVM_SET_USER_MEMORY_REGION2 */
+struct kvm_userspace_memory_region2 {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size;
+ __u64 userspace_addr;
+ __u64 guest_memfd_offset;
+ __u32 guest_memfd;
+ __u32 pad1;
+ __u64 pad2[14];
+};
+
/*
* The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
* userspace, other bits are reserved for kvm internal use which are defined
@@ -102,6 +45,7 @@ struct kvm_userspace_memory_region {
*/
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1)
+#define KVM_MEM_GUEST_MEMFD (1UL << 2)
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
@@ -264,6 +208,8 @@ struct kvm_xen_exit {
#define KVM_EXIT_RISCV_SBI 35
#define KVM_EXIT_RISCV_CSR 36
#define KVM_EXIT_NOTIFY 37
+#define KVM_EXIT_LOONGARCH_IOCSR 38
+#define KVM_EXIT_MEMORY_FAULT 39
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -336,6 +282,13 @@ struct kvm_run {
__u32 len;
__u8 is_write;
} mmio;
+ /* KVM_EXIT_LOONGARCH_IOCSR */
+ struct {
+ __u64 phys_addr;
+ __u8 data[8];
+ __u32 len;
+ __u8 is_write;
+ } iocsr_io;
/* KVM_EXIT_HYPERCALL */
struct {
__u64 nr;
@@ -510,6 +463,13 @@ struct kvm_run {
#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
__u32 flags;
} notify;
+ /* KVM_EXIT_MEMORY_FAULT */
+ struct {
+#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3)
+ __u64 flags;
+ __u64 gpa;
+ __u64 size;
+ } memory_fault;
/* Fix the size of the union. */
char padding[256];
};
@@ -937,9 +897,6 @@ struct kvm_ppc_resize_hpt {
*/
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
-#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
-#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
@@ -1190,6 +1147,14 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
#define KVM_CAP_COUNTER_OFFSET 227
+#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
+#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
+#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230
+#define KVM_CAP_USER_MEMORY2 231
+#define KVM_CAP_MEMORY_FAULT_INFO 232
+#define KVM_CAP_MEMORY_ATTRIBUTES 233
+#define KVM_CAP_GUEST_MEMFD 234
+#define KVM_CAP_VM_TYPES 235
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1280,6 +1245,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
+#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
struct kvm_xen_hvm_config {
__u32 flags;
@@ -1360,6 +1326,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_ARM64 0x6000000000000000ULL
#define KVM_REG_MIPS 0x7000000000000000ULL
#define KVM_REG_RISCV 0x8000000000000000ULL
+#define KVM_REG_LOONGARCH 0x9000000000000000ULL
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
@@ -1416,9 +1383,16 @@ struct kvm_device_attr {
__u64 addr; /* userspace address of attr data */
};
-#define KVM_DEV_VFIO_GROUP 1
-#define KVM_DEV_VFIO_GROUP_ADD 1
-#define KVM_DEV_VFIO_GROUP_DEL 2
+#define KVM_DEV_VFIO_FILE 1
+
+#define KVM_DEV_VFIO_FILE_ADD 1
+#define KVM_DEV_VFIO_FILE_DEL 2
+
+/* KVM_DEV_VFIO_GROUP aliases are for compile time uapi compatibility */
+#define KVM_DEV_VFIO_GROUP KVM_DEV_VFIO_FILE
+
+#define KVM_DEV_VFIO_GROUP_ADD KVM_DEV_VFIO_FILE_ADD
+#define KVM_DEV_VFIO_GROUP_DEL KVM_DEV_VFIO_FILE_DEL
#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
enum kvm_device_type {
@@ -1442,6 +1416,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_ARM_PV_TIME,
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
+ KVM_DEV_TYPE_RISCV_AIA,
+#define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA
KVM_DEV_TYPE_MAX,
};
@@ -1462,6 +1438,8 @@ struct kvm_vfio_spapr_tce {
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
#define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x49, \
+ struct kvm_userspace_memory_region2)
/* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
@@ -1486,20 +1464,8 @@ struct kvm_s390_ucas_mapping {
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
-#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
- struct kvm_assigned_pci_dev)
#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
-/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
-#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70
-#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
-#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
- struct kvm_assigned_pci_dev)
-#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \
- struct kvm_assigned_msix_nr)
-#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \
- struct kvm_assigned_msix_entry)
-#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
#define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
#define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
@@ -1516,9 +1482,6 @@ struct kvm_s390_ucas_mapping {
* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
-/* Available with KVM_CAP_PCI_2_3 */
-#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
- struct kvm_assigned_pci_dev)
/* Available with KVM_CAP_SIGNAL_MSI */
#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
@@ -1551,6 +1514,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
/* Available with KVM_CAP_COUNTER_OFFSET */
#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset)
+#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
@@ -1570,8 +1534,6 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
#define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
#define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
-/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
-#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87
#define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
#define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
#define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
@@ -1613,7 +1575,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
#define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
/*
- * vcpu version available with KVM_ENABLE_CAP
+ * vcpu version available with KVM_CAP_ENABLE_CAP
* vm version available with KVM_CAP_ENABLE_CAP_VM
*/
#define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
@@ -2245,4 +2207,24 @@ struct kvm_s390_zpci_op {
/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
+/* Available with KVM_CAP_MEMORY_ATTRIBUTES */
+#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
+
+struct kvm_memory_attributes {
+ __u64 address;
+ __u64 size;
+ __u64 attributes;
+ __u64 flags;
+};
+
+#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3)
+
+#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd)
+
+struct kvm_create_guest_memfd {
+ __u64 size;
+ __u64 flags;
+ __u64 reserved[6];
+};
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h
index 81d09ef9aa50..25c8d7677539 100644
--- a/include/uapi/linux/landlock.h
+++ b/include/uapi/linux/landlock.h
@@ -31,6 +31,12 @@ struct landlock_ruleset_attr {
* this access right.
*/
__u64 handled_access_fs;
+ /**
+ * @handled_access_net: Bitmask of actions (cf. `Network flags`_)
+ * that is handled by this ruleset and should then be forbidden if no
+ * rule explicitly allow them.
+ */
+ __u64 handled_access_net;
};
/*
@@ -54,6 +60,11 @@ enum landlock_rule_type {
* landlock_path_beneath_attr .
*/
LANDLOCK_RULE_PATH_BENEATH = 1,
+ /**
+ * @LANDLOCK_RULE_NET_PORT: Type of a &struct
+ * landlock_net_port_attr .
+ */
+ LANDLOCK_RULE_NET_PORT,
};
/**
@@ -80,6 +91,31 @@ struct landlock_path_beneath_attr {
} __attribute__((packed));
/**
+ * struct landlock_net_port_attr - Network port definition
+ *
+ * Argument of sys_landlock_add_rule().
+ */
+struct landlock_net_port_attr {
+ /**
+ * @allowed_access: Bitmask of allowed access network for a port
+ * (cf. `Network flags`_).
+ */
+ __u64 allowed_access;
+ /**
+ * @port: Network port in host endianness.
+ *
+ * It should be noted that port 0 passed to :manpage:`bind(2)` will
+ * bind to an available port from a specific port range. This can be
+ * configured thanks to the ``/proc/sys/net/ipv4/ip_local_port_range``
+ * sysctl (also used for IPv6). A Landlock rule with port 0 and the
+ * ``LANDLOCK_ACCESS_NET_BIND_TCP`` right means that requesting to bind
+ * on port 0 is allowed and it will automatically translate to binding
+ * on the related port range.
+ */
+ __u64 port;
+};
+
+/**
* DOC: fs_access
*
* A set of actions on kernel objects may be defined by an attribute (e.g.
@@ -189,4 +225,23 @@ struct landlock_path_beneath_attr {
#define LANDLOCK_ACCESS_FS_TRUNCATE (1ULL << 14)
/* clang-format on */
+/**
+ * DOC: net_access
+ *
+ * Network flags
+ * ~~~~~~~~~~~~~~~~
+ *
+ * These flags enable to restrict a sandboxed process to a set of network
+ * actions. This is supported since the Landlock ABI version 4.
+ *
+ * TCP sockets with allowed actions:
+ *
+ * - %LANDLOCK_ACCESS_NET_BIND_TCP: Bind a TCP socket to a local port.
+ * - %LANDLOCK_ACCESS_NET_CONNECT_TCP: Connect an active TCP socket to
+ * a remote port.
+ */
+/* clang-format off */
+#define LANDLOCK_ACCESS_NET_BIND_TCP (1ULL << 0)
+#define LANDLOCK_ACCESS_NET_CONNECT_TCP (1ULL << 1)
+/* clang-format on */
#endif /* _UAPI_LINUX_LANDLOCK_H */
diff --git a/include/uapi/linux/lsm.h b/include/uapi/linux/lsm.h
new file mode 100644
index 000000000000..f8aef9ade549
--- /dev/null
+++ b/include/uapi/linux/lsm.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Linux Security Modules (LSM) - User space API
+ *
+ * Copyright (C) 2022 Casey Schaufler <casey@schaufler-ca.com>
+ * Copyright (C) 2022 Intel Corporation
+ */
+
+#ifndef _UAPI_LINUX_LSM_H
+#define _UAPI_LINUX_LSM_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/unistd.h>
+
+/**
+ * struct lsm_ctx - LSM context information
+ * @id: the LSM id number, see LSM_ID_XXX
+ * @flags: LSM specific flags
+ * @len: length of the lsm_ctx struct, @ctx and any other data or padding
+ * @ctx_len: the size of @ctx
+ * @ctx: the LSM context value
+ *
+ * The @len field MUST be equal to the size of the lsm_ctx struct
+ * plus any additional padding and/or data placed after @ctx.
+ *
+ * In all cases @ctx_len MUST be equal to the length of @ctx.
+ * If @ctx is a string value it should be nul terminated with
+ * @ctx_len equal to `strlen(@ctx) + 1`. Binary values are
+ * supported.
+ *
+ * The @flags and @ctx fields SHOULD only be interpreted by the
+ * LSM specified by @id; they MUST be set to zero/0 when not used.
+ */
+struct lsm_ctx {
+ __u64 id;
+ __u64 flags;
+ __u64 len;
+ __u64 ctx_len;
+ __u8 ctx[] __counted_by(ctx_len);
+};
+
+/*
+ * ID tokens to identify Linux Security Modules (LSMs)
+ *
+ * These token values are used to uniquely identify specific LSMs
+ * in the kernel as well as in the kernel's LSM userspace API.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ID_UNDEF 0
+#define LSM_ID_CAPABILITY 100
+#define LSM_ID_SELINUX 101
+#define LSM_ID_SMACK 102
+#define LSM_ID_TOMOYO 103
+#define LSM_ID_APPARMOR 104
+#define LSM_ID_YAMA 105
+#define LSM_ID_LOADPIN 106
+#define LSM_ID_SAFESETID 107
+#define LSM_ID_LOCKDOWN 108
+#define LSM_ID_BPF 109
+#define LSM_ID_LANDLOCK 110
+
+/*
+ * LSM_ATTR_XXX definitions identify different LSM attributes
+ * which are used in the kernel's LSM userspace API. Support
+ * for these attributes vary across the different LSMs. None
+ * are required.
+ *
+ * A value of zero/0 is considered undefined and should not be used
+ * outside the kernel. Values 1-99 are reserved for potential
+ * future use.
+ */
+#define LSM_ATTR_UNDEF 0
+#define LSM_ATTR_CURRENT 100
+#define LSM_ATTR_EXEC 101
+#define LSM_ATTR_FSCREATE 102
+#define LSM_ATTR_KEYCREATE 103
+#define LSM_ATTR_PREV 104
+#define LSM_ATTR_SOCKCREATE 105
+
+/*
+ * LSM_FLAG_XXX definitions identify special handling instructions
+ * for the API.
+ */
+#define LSM_FLAG_SINGLE 0x0001
+
+#endif /* _UAPI_LINUX_LSM_H */
diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h
index 256b463e47a6..d03863da180e 100644
--- a/include/uapi/linux/mdio.h
+++ b/include/uapi/linux/mdio.h
@@ -82,6 +82,8 @@
#define MDIO_AN_10BT1_AN_CTRL 526 /* 10BASE-T1 AN control register */
#define MDIO_AN_10BT1_AN_STAT 527 /* 10BASE-T1 AN status register */
#define MDIO_PMA_PMD_BT1_CTRL 2100 /* BASE-T1 PMA/PMD control register */
+#define MDIO_PCS_1000BT1_CTRL 2304 /* 1000BASE-T1 PCS control register */
+#define MDIO_PCS_1000BT1_STAT 2305 /* 1000BASE-T1 PCS status register */
/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
@@ -231,6 +233,30 @@
#define MDIO_PMA_EXTABLE_BT1 0x0800 /* BASE-T1 ability */
#define MDIO_PMA_EXTABLE_NBT 0x4000 /* 2.5/5GBASE-T ability */
+/* AN Clause 73 linkword */
+#define MDIO_AN_C73_0_S_MASK GENMASK(4, 0)
+#define MDIO_AN_C73_0_E_MASK GENMASK(9, 5)
+#define MDIO_AN_C73_0_PAUSE BIT(10)
+#define MDIO_AN_C73_0_ASM_DIR BIT(11)
+#define MDIO_AN_C73_0_C2 BIT(12)
+#define MDIO_AN_C73_0_RF BIT(13)
+#define MDIO_AN_C73_0_ACK BIT(14)
+#define MDIO_AN_C73_0_NP BIT(15)
+#define MDIO_AN_C73_1_T_MASK GENMASK(4, 0)
+#define MDIO_AN_C73_1_1000BASE_KX BIT(5)
+#define MDIO_AN_C73_1_10GBASE_KX4 BIT(6)
+#define MDIO_AN_C73_1_10GBASE_KR BIT(7)
+#define MDIO_AN_C73_1_40GBASE_KR4 BIT(8)
+#define MDIO_AN_C73_1_40GBASE_CR4 BIT(9)
+#define MDIO_AN_C73_1_100GBASE_CR10 BIT(10)
+#define MDIO_AN_C73_1_100GBASE_KP4 BIT(11)
+#define MDIO_AN_C73_1_100GBASE_KR4 BIT(12)
+#define MDIO_AN_C73_1_100GBASE_CR4 BIT(13)
+#define MDIO_AN_C73_1_25GBASE_R_S BIT(14)
+#define MDIO_AN_C73_1_25GBASE_R BIT(15)
+#define MDIO_AN_C73_2_2500BASE_KX BIT(0)
+#define MDIO_AN_C73_2_5GBASE_KR BIT(1)
+
/* PHY XGXS lane state register. */
#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001
#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002
@@ -308,6 +334,8 @@
#define MDIO_PCS_10T1L_CTRL_RESET 0x8000 /* PCS reset */
/* BASE-T1 PMA/PMD extended ability register. */
+#define MDIO_PMA_PMD_BT1_B100_ABLE 0x0001 /* 100BASE-T1 Ability */
+#define MDIO_PMA_PMD_BT1_B1000_ABLE 0x0002 /* 1000BASE-T1 Ability */
#define MDIO_PMA_PMD_BT1_B10L_ABLE 0x0004 /* 10BASE-T1L Ability */
/* BASE-T1 auto-negotiation advertisement register [15:0] */
@@ -349,7 +377,19 @@
#define MDIO_AN_10BT1_AN_STAT_LPA_EEE_T1L 0x4000 /* 10BASE-T1L LP EEE ability advertisement */
/* BASE-T1 PMA/PMD control register */
-#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */
+#define MDIO_PMA_PMD_BT1_CTRL_STRAP 0x000F /* Type selection (Strap) */
+#define MDIO_PMA_PMD_BT1_CTRL_STRAP_B1000 0x0001 /* Select 1000BASE-T1 */
+#define MDIO_PMA_PMD_BT1_CTRL_CFG_MST 0x4000 /* MASTER-SLAVE config value */
+
+/* 1000BASE-T1 PCS control register */
+#define MDIO_PCS_1000BT1_CTRL_LOW_POWER 0x0800 /* Low power mode */
+#define MDIO_PCS_1000BT1_CTRL_DISABLE_TX 0x4000 /* Global PMA transmit disable */
+#define MDIO_PCS_1000BT1_CTRL_RESET 0x8000 /* Software reset value */
+
+/* 1000BASE-T1 PCS status register */
+#define MDIO_PCS_1000BT1_STAT_LINK 0x0004 /* PCS Link is up */
+#define MDIO_PCS_1000BT1_STAT_FAULT 0x0080 /* There is a fault condition */
+
/* EEE Supported/Advertisement/LP Advertisement registers.
*
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index a03c543cb072..f05f747e444d 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x1025 */
+/* RGB - next is 0x1026 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -46,6 +46,7 @@
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
+#define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025
#define MEDIA_BUS_FMT_BGR666_1X18 0x1023
#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 3ddadaea849f..1c80b1d6bbaf 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -140,8 +140,8 @@ struct media_device_info {
#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002)
/* Entity flags */
-#define MEDIA_ENT_FL_DEFAULT (1 << 0)
-#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
+#define MEDIA_ENT_FL_DEFAULT (1U << 0)
+#define MEDIA_ENT_FL_CONNECTOR (1U << 1)
/* OR with the entity id value to find the next entity */
#define MEDIA_ENT_ID_FLAG_NEXT (1U << 31)
@@ -205,9 +205,9 @@ struct media_entity_desc {
};
};
-#define MEDIA_PAD_FL_SINK (1 << 0)
-#define MEDIA_PAD_FL_SOURCE (1 << 1)
-#define MEDIA_PAD_FL_MUST_CONNECT (1 << 2)
+#define MEDIA_PAD_FL_SINK (1U << 0)
+#define MEDIA_PAD_FL_SOURCE (1U << 1)
+#define MEDIA_PAD_FL_MUST_CONNECT (1U << 2)
struct media_pad_desc {
__u32 entity; /* entity ID */
@@ -216,14 +216,14 @@ struct media_pad_desc {
__u32 reserved[2];
};
-#define MEDIA_LNK_FL_ENABLED (1 << 0)
-#define MEDIA_LNK_FL_IMMUTABLE (1 << 1)
-#define MEDIA_LNK_FL_DYNAMIC (1 << 2)
+#define MEDIA_LNK_FL_ENABLED (1U << 0)
+#define MEDIA_LNK_FL_IMMUTABLE (1U << 1)
+#define MEDIA_LNK_FL_DYNAMIC (1U << 2)
#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28)
-# define MEDIA_LNK_FL_DATA_LINK (0 << 28)
-# define MEDIA_LNK_FL_INTERFACE_LINK (1 << 28)
-# define MEDIA_LNK_FL_ANCILLARY_LINK (2 << 28)
+# define MEDIA_LNK_FL_DATA_LINK (0U << 28)
+# define MEDIA_LNK_FL_INTERFACE_LINK (1U << 28)
+# define MEDIA_LNK_FL_ANCILLARY_LINK (2U << 28)
struct media_link_desc {
struct media_pad_desc source;
@@ -293,7 +293,7 @@ struct media_links_enum {
* struct media_device_info.
*/
#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \
- ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+ ((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_entity {
__u32 id;
@@ -328,7 +328,7 @@ struct media_v2_interface {
* struct media_device_info.
*/
#define MEDIA_V2_PAD_HAS_INDEX(media_version) \
- ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+ ((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_pad {
__u32 id;
@@ -432,7 +432,7 @@ struct media_v2_topology {
#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
/* Obsolete symbol for media_version, no longer used in the kernel */
-#define MEDIA_API_VERSION ((0 << 16) | (1 << 8) | 0)
+#define MEDIA_API_VERSION ((0U << 16) | (1U << 8) | 0U)
#endif
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index 6e57743628c0..68a0272e99b7 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -14,8 +14,8 @@
* FW Client (given by UUID). This opens a communication channel
* between a host client and a FW client. From this point every read and write
* will communicate with the associated FW client.
- * Only in close() (file_operation release()) the communication between
- * the clients is disconnected
+ * Only in close() (file_operation release()) is the communication between
+ * the clients disconnected.
*
* The IOCTL argument is a struct with a union that contains
* the input parameter and the output parameter for this IOCTL.
@@ -51,7 +51,7 @@ struct mei_connect_client_data {
* DOC: set and unset event notification for a connected client
*
* The IOCTL argument is 1 for enabling event notification and 0 for
- * disabling the service
+ * disabling the service.
* Return: -EOPNOTSUPP if the devices doesn't support the feature
*/
#define IOCTL_MEI_NOTIFY_SET _IOW('H', 0x02, __u32)
@@ -59,8 +59,8 @@ struct mei_connect_client_data {
/**
* DOC: retrieve notification
*
- * The IOCTL output argument is 1 if an event was is pending and 0 otherwise
- * the ioctl has to be called in order to acknowledge pending event
+ * The IOCTL output argument is 1 if an event was pending and 0 otherwise.
+ * The ioctl has to be called in order to acknowledge pending event.
*
* Return: -EOPNOTSUPP if the devices doesn't support the feature
*/
@@ -98,16 +98,16 @@ struct mei_connect_client_data_vtag {
* FW Client (given by UUID), and virtual tag (vtag).
* The IOCTL opens a communication channel between a host client and
* a FW client on a tagged channel. From this point on, every read
- * and write will communicate with the associated FW client with
+ * and write will communicate with the associated FW client
* on the tagged channel.
- * Upone close() the communication is terminated.
+ * Upon close() the communication is terminated.
*
* The IOCTL argument is a struct with a union that contains
* the input parameter and the output parameter for this IOCTL.
*
- * The input parameter is UUID of the FW Client, a vtag [0,255]
+ * The input parameter is UUID of the FW Client, a vtag [0,255].
* The output parameter is the properties of the FW client
- * (FW protocool version and max message size).
+ * (FW protocol version and max message size).
*
* Clients that do not support tagged connection
* will respond with -EOPNOTSUPP.
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 046d0ccba4cd..a8963f7ef4c2 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -48,7 +48,7 @@ enum {
#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform
to policy */
#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to policy */
-#define MPOL_MF_LAZY (1<<3) /* Modifies '_MOVE: lazy migrate on fault */
+#define MPOL_MF_LAZY (1<<3) /* UNSUPPORTED FLAG: Lazy migrate on fault */
#define MPOL_MF_INTERNAL (1<<4) /* Internal flags start here */
#define MPOL_MF_VALID (MPOL_MF_STRICT | \
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index f55bc680b5b0..a246e11988d5 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -4,6 +4,7 @@
#include <asm/mman.h>
#include <asm-generic/hugetlb_encode.h>
+#include <linux/types.h>
#define MREMAP_MAYMOVE 1
#define MREMAP_FIXED 2
@@ -41,4 +42,17 @@
#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
+struct cachestat_range {
+ __u64 off;
+ __u64 len;
+};
+
+struct cachestat {
+ __u64 nr_cache;
+ __u64 nr_dirty;
+ __u64 nr_writeback;
+ __u64 nr_evicted;
+ __u64 nr_recently_evicted;
+};
+
#endif /* _UAPI_LINUX_MMAN_H */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 4d93967f8aea..ad5478dbad00 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -74,7 +74,8 @@
#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */
#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */
-#define MOVE_MOUNT__MASK 0x00000177
+#define MOVE_MOUNT_BENEATH 0x00000200 /* Mount beneath top mount */
+#define MOVE_MOUNT__MASK 0x00000377
/*
* fsopen() flags.
@@ -99,8 +100,9 @@ enum fsconfig_command {
FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */
FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */
FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */
- FSCONFIG_CMD_CREATE = 6, /* Invoke superblock creation */
+ FSCONFIG_CMD_CREATE = 6, /* Create new or reuse existing superblock */
FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */
+ FSCONFIG_CMD_CREATE_EXCL = 8, /* Create new superblock, fail if reusing existing superblock */
};
/*
@@ -136,4 +138,74 @@ struct mount_attr {
/* List of all mount_attr versions. */
#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
+
+/*
+ * Structure for getting mount/superblock/filesystem info with statmount(2).
+ *
+ * The interface is similar to statx(2): individual fields or groups can be
+ * selected with the @mask argument of statmount(). Kernel will set the @mask
+ * field according to the supported fields.
+ *
+ * If string fields are selected, then the caller needs to pass a buffer that
+ * has space after the fixed part of the structure. Nul terminated strings are
+ * copied there and offsets relative to @str are stored in the relevant fields.
+ * If the buffer is too small, then EOVERFLOW is returned. The actually used
+ * size is returned in @size.
+ */
+struct statmount {
+ __u32 size; /* Total size, including strings */
+ __u32 __spare1;
+ __u64 mask; /* What results were written */
+ __u32 sb_dev_major; /* Device ID */
+ __u32 sb_dev_minor;
+ __u64 sb_magic; /* ..._SUPER_MAGIC */
+ __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
+ __u32 fs_type; /* [str] Filesystem type */
+ __u64 mnt_id; /* Unique ID of mount */
+ __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
+ __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
+ __u32 mnt_parent_id_old;
+ __u64 mnt_attr; /* MOUNT_ATTR_... */
+ __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
+ __u64 mnt_peer_group; /* ID of shared peer group */
+ __u64 mnt_master; /* Mount receives propagation from this ID */
+ __u64 propagate_from; /* Propagation from in current namespace */
+ __u32 mnt_root; /* [str] Root of mount relative to root of fs */
+ __u32 mnt_point; /* [str] Mountpoint relative to current root */
+ __u64 __spare2[50];
+ char str[]; /* Variable size part containing strings */
+};
+
+/*
+ * Structure for passing mount ID and miscellaneous parameters to statmount(2)
+ * and listmount(2).
+ *
+ * For statmount(2) @param represents the request mask.
+ * For listmount(2) @param represents the last listed mount id (or zero).
+ */
+struct mnt_id_req {
+ __u32 size;
+ __u32 spare;
+ __u64 mnt_id;
+ __u64 param;
+};
+
+/* List of all mnt_id_req versions. */
+#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
+
+/*
+ * @mask bits for statmount(2)
+ */
+#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */
+#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
+#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
+#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
+#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
+#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
+
+/*
+ * Special @mnt_id values that can be passed to listmount
+ */
+#define LSMT_ROOT 0xffffffffffffffff /* root mount */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index 32af2d278cb4..74cfe496891e 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -23,91 +23,20 @@
#define MPTCP_SUBFLOW_FLAG_CONNECTED _BITUL(7)
#define MPTCP_SUBFLOW_FLAG_MAPVALID _BITUL(8)
-enum {
- MPTCP_SUBFLOW_ATTR_UNSPEC,
- MPTCP_SUBFLOW_ATTR_TOKEN_REM,
- MPTCP_SUBFLOW_ATTR_TOKEN_LOC,
- MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
- MPTCP_SUBFLOW_ATTR_MAP_SEQ,
- MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
- MPTCP_SUBFLOW_ATTR_SSN_OFFSET,
- MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
- MPTCP_SUBFLOW_ATTR_FLAGS,
- MPTCP_SUBFLOW_ATTR_ID_REM,
- MPTCP_SUBFLOW_ATTR_ID_LOC,
- MPTCP_SUBFLOW_ATTR_PAD,
- __MPTCP_SUBFLOW_ATTR_MAX
-};
-
-#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1)
-
-/* netlink interface */
-#define MPTCP_PM_NAME "mptcp_pm"
#define MPTCP_PM_CMD_GRP_NAME "mptcp_pm_cmds"
#define MPTCP_PM_EV_GRP_NAME "mptcp_pm_events"
-#define MPTCP_PM_VER 0x1
-
-/*
- * ATTR types defined for MPTCP
- */
-enum {
- MPTCP_PM_ATTR_UNSPEC,
-
- MPTCP_PM_ATTR_ADDR, /* nested address */
- MPTCP_PM_ATTR_RCV_ADD_ADDRS, /* u32 */
- MPTCP_PM_ATTR_SUBFLOWS, /* u32 */
- MPTCP_PM_ATTR_TOKEN, /* u32 */
- MPTCP_PM_ATTR_LOC_ID, /* u8 */
- MPTCP_PM_ATTR_ADDR_REMOTE, /* nested address */
-
- __MPTCP_PM_ATTR_MAX
-};
-
-#define MPTCP_PM_ATTR_MAX (__MPTCP_PM_ATTR_MAX - 1)
-
-enum {
- MPTCP_PM_ADDR_ATTR_UNSPEC,
- MPTCP_PM_ADDR_ATTR_FAMILY, /* u16 */
- MPTCP_PM_ADDR_ATTR_ID, /* u8 */
- MPTCP_PM_ADDR_ATTR_ADDR4, /* struct in_addr */
- MPTCP_PM_ADDR_ATTR_ADDR6, /* struct in6_addr */
- MPTCP_PM_ADDR_ATTR_PORT, /* u16 */
- MPTCP_PM_ADDR_ATTR_FLAGS, /* u32 */
- MPTCP_PM_ADDR_ATTR_IF_IDX, /* s32 */
-
- __MPTCP_PM_ADDR_ATTR_MAX
-};
-
-#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
-
-#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
-#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
-#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
-#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3)
-#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4)
-
-enum {
- MPTCP_PM_CMD_UNSPEC,
-
- MPTCP_PM_CMD_ADD_ADDR,
- MPTCP_PM_CMD_DEL_ADDR,
- MPTCP_PM_CMD_GET_ADDR,
- MPTCP_PM_CMD_FLUSH_ADDRS,
- MPTCP_PM_CMD_SET_LIMITS,
- MPTCP_PM_CMD_GET_LIMITS,
- MPTCP_PM_CMD_SET_FLAGS,
- MPTCP_PM_CMD_ANNOUNCE,
- MPTCP_PM_CMD_REMOVE,
- MPTCP_PM_CMD_SUBFLOW_CREATE,
- MPTCP_PM_CMD_SUBFLOW_DESTROY,
-
- __MPTCP_PM_CMD_AFTER_LAST
-};
+#include <linux/mptcp_pm.h>
#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0)
#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
+#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
+#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
+#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
+#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3)
+#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4)
+
struct mptcp_info {
__u8 mptcpi_subflows;
__u8 mptcpi_add_addr_signal;
@@ -123,95 +52,14 @@ struct mptcp_info {
__u8 mptcpi_local_addr_used;
__u8 mptcpi_local_addr_max;
__u8 mptcpi_csum_enabled;
+ __u32 mptcpi_retransmits;
+ __u64 mptcpi_bytes_retrans;
+ __u64 mptcpi_bytes_sent;
+ __u64 mptcpi_bytes_received;
+ __u64 mptcpi_bytes_acked;
+ __u8 mptcpi_subflows_total;
};
-/*
- * MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport
- * A new MPTCP connection has been created. It is the good time to allocate
- * memory and send ADD_ADDR if needed. Depending on the traffic-patterns
- * it can take a long time until the MPTCP_EVENT_ESTABLISHED is sent.
- *
- * MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport
- * A MPTCP connection is established (can start new subflows).
- *
- * MPTCP_EVENT_CLOSED: token
- * A MPTCP connection has stopped.
- *
- * MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport]
- * A new address has been announced by the peer.
- *
- * MPTCP_EVENT_REMOVED: token, rem_id
- * An address has been lost by the peer.
- *
- * MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id,
- * saddr4 | saddr6, daddr4 | daddr6, sport,
- * dport, backup, if_idx [, error]
- * A new subflow has been established. 'error' should not be set.
- *
- * MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
- * daddr4 | daddr6, sport, dport, backup, if_idx
- * [, error]
- * A subflow has been closed. An error (copy of sk_err) could be set if an
- * error has been detected for this subflow.
- *
- * MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
- * daddr4 | daddr6, sport, dport, backup, if_idx
- * [, error]
- * The priority of a subflow has changed. 'error' should not be set.
- *
- * MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6
- * A new PM listener is created.
- *
- * MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6
- * A PM listener is closed.
- */
-enum mptcp_event_type {
- MPTCP_EVENT_UNSPEC = 0,
- MPTCP_EVENT_CREATED = 1,
- MPTCP_EVENT_ESTABLISHED = 2,
- MPTCP_EVENT_CLOSED = 3,
-
- MPTCP_EVENT_ANNOUNCED = 6,
- MPTCP_EVENT_REMOVED = 7,
-
- MPTCP_EVENT_SUB_ESTABLISHED = 10,
- MPTCP_EVENT_SUB_CLOSED = 11,
-
- MPTCP_EVENT_SUB_PRIORITY = 13,
-
- MPTCP_EVENT_LISTENER_CREATED = 15,
- MPTCP_EVENT_LISTENER_CLOSED = 16,
-};
-
-enum mptcp_event_attr {
- MPTCP_ATTR_UNSPEC = 0,
-
- MPTCP_ATTR_TOKEN, /* u32 */
- MPTCP_ATTR_FAMILY, /* u16 */
- MPTCP_ATTR_LOC_ID, /* u8 */
- MPTCP_ATTR_REM_ID, /* u8 */
- MPTCP_ATTR_SADDR4, /* be32 */
- MPTCP_ATTR_SADDR6, /* struct in6_addr */
- MPTCP_ATTR_DADDR4, /* be32 */
- MPTCP_ATTR_DADDR6, /* struct in6_addr */
- MPTCP_ATTR_SPORT, /* be16 */
- MPTCP_ATTR_DPORT, /* be16 */
- MPTCP_ATTR_BACKUP, /* u8 */
- MPTCP_ATTR_ERROR, /* u8 */
- MPTCP_ATTR_FLAGS, /* u16 */
- MPTCP_ATTR_TIMEOUT, /* u32 */
- MPTCP_ATTR_IF_IDX, /* s32 */
- MPTCP_ATTR_RESET_REASON,/* u32 */
- MPTCP_ATTR_RESET_FLAGS, /* u32 */
- MPTCP_ATTR_SERVER_SIDE, /* u8 */
-
- __MPTCP_ATTR_AFTER_LAST
-};
-
-#define MPTCP_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1)
-
/* MPTCP Reset reason codes, rfc8684 */
#define MPTCP_RST_EUNSPEC 0
#define MPTCP_RST_EMPTCP 1
@@ -244,9 +92,33 @@ struct mptcp_subflow_addrs {
};
};
+struct mptcp_subflow_info {
+ __u32 id;
+ struct mptcp_subflow_addrs addrs;
+};
+
+struct mptcp_full_info {
+ __u32 size_tcpinfo_kernel; /* must be 0, set by kernel */
+ __u32 size_tcpinfo_user;
+ __u32 size_sfinfo_kernel; /* must be 0, set by kernel */
+ __u32 size_sfinfo_user;
+ __u32 num_subflows; /* must be 0, set by kernel (real subflow count) */
+ __u32 size_arrays_user; /* max subflows that userspace is interested in;
+ * the buffers at subflow_info/tcp_info
+ * are respectively at least:
+ * size_arrays * size_sfinfo_user
+ * size_arrays * size_tcpinfo_user
+ * bytes wide
+ */
+ __aligned_u64 subflow_info;
+ __aligned_u64 tcp_info;
+ struct mptcp_info mptcp_info;
+};
+
/* MPTCP socket options */
#define MPTCP_INFO 1
#define MPTCP_TCPINFO 2
#define MPTCP_SUBFLOW_ADDRS 3
+#define MPTCP_FULL_INFO 4
#endif /* _UAPI_MPTCP_H */
diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
new file mode 100644
index 000000000000..50589e5dd6a3
--- /dev/null
+++ b/include/uapi/linux/mptcp_pm.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/mptcp_pm.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_MPTCP_PM_H
+#define _UAPI_LINUX_MPTCP_PM_H
+
+#define MPTCP_PM_NAME "mptcp_pm"
+#define MPTCP_PM_VER 1
+
+/**
+ * enum mptcp_event_type
+ * @MPTCP_EVENT_UNSPEC: unused event
+ * @MPTCP_EVENT_CREATED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport A new MPTCP connection has been created. It is the good time
+ * to allocate memory and send ADD_ADDR if needed. Depending on the
+ * traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED
+ * is sent.
+ * @MPTCP_EVENT_ESTABLISHED: token, family, saddr4 | saddr6, daddr4 | daddr6,
+ * sport, dport A MPTCP connection is established (can start new subflows).
+ * @MPTCP_EVENT_CLOSED: token A MPTCP connection has stopped.
+ * @MPTCP_EVENT_ANNOUNCED: token, rem_id, family, daddr4 | daddr6 [, dport] A
+ * new address has been announced by the peer.
+ * @MPTCP_EVENT_REMOVED: token, rem_id An address has been lost by the peer.
+ * @MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id, saddr4 |
+ * saddr6, daddr4 | daddr6, sport, dport, backup, if_idx [, error] A new
+ * subflow has been established. 'error' should not be set.
+ * @MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx [, error] A subflow has been
+ * closed. An error (copy of sk_err) could be set if an error has been
+ * detected for this subflow.
+ * @MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx [, error] The priority of a
+ * subflow has changed. 'error' should not be set.
+ * @MPTCP_EVENT_LISTENER_CREATED: family, sport, saddr4 | saddr6 A new PM
+ * listener is created.
+ * @MPTCP_EVENT_LISTENER_CLOSED: family, sport, saddr4 | saddr6 A PM listener
+ * is closed.
+ */
+enum mptcp_event_type {
+ MPTCP_EVENT_UNSPEC,
+ MPTCP_EVENT_CREATED,
+ MPTCP_EVENT_ESTABLISHED,
+ MPTCP_EVENT_CLOSED,
+ MPTCP_EVENT_ANNOUNCED = 6,
+ MPTCP_EVENT_REMOVED,
+ MPTCP_EVENT_SUB_ESTABLISHED = 10,
+ MPTCP_EVENT_SUB_CLOSED,
+ MPTCP_EVENT_SUB_PRIORITY = 13,
+ MPTCP_EVENT_LISTENER_CREATED = 15,
+ MPTCP_EVENT_LISTENER_CLOSED,
+};
+
+enum {
+ MPTCP_PM_ADDR_ATTR_UNSPEC,
+ MPTCP_PM_ADDR_ATTR_FAMILY,
+ MPTCP_PM_ADDR_ATTR_ID,
+ MPTCP_PM_ADDR_ATTR_ADDR4,
+ MPTCP_PM_ADDR_ATTR_ADDR6,
+ MPTCP_PM_ADDR_ATTR_PORT,
+ MPTCP_PM_ADDR_ATTR_FLAGS,
+ MPTCP_PM_ADDR_ATTR_IF_IDX,
+
+ __MPTCP_PM_ADDR_ATTR_MAX
+};
+#define MPTCP_PM_ADDR_ATTR_MAX (__MPTCP_PM_ADDR_ATTR_MAX - 1)
+
+enum {
+ MPTCP_SUBFLOW_ATTR_UNSPEC,
+ MPTCP_SUBFLOW_ATTR_TOKEN_REM,
+ MPTCP_SUBFLOW_ATTR_TOKEN_LOC,
+ MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SEQ,
+ MPTCP_SUBFLOW_ATTR_MAP_SFSEQ,
+ MPTCP_SUBFLOW_ATTR_SSN_OFFSET,
+ MPTCP_SUBFLOW_ATTR_MAP_DATALEN,
+ MPTCP_SUBFLOW_ATTR_FLAGS,
+ MPTCP_SUBFLOW_ATTR_ID_REM,
+ MPTCP_SUBFLOW_ATTR_ID_LOC,
+ MPTCP_SUBFLOW_ATTR_PAD,
+
+ __MPTCP_SUBFLOW_ATTR_MAX
+};
+#define MPTCP_SUBFLOW_ATTR_MAX (__MPTCP_SUBFLOW_ATTR_MAX - 1)
+
+enum {
+ MPTCP_PM_ENDPOINT_ADDR = 1,
+
+ __MPTCP_PM_ENDPOINT_MAX
+};
+#define MPTCP_PM_ENDPOINT_MAX (__MPTCP_PM_ENDPOINT_MAX - 1)
+
+enum {
+ MPTCP_PM_ATTR_UNSPEC,
+ MPTCP_PM_ATTR_ADDR,
+ MPTCP_PM_ATTR_RCV_ADD_ADDRS,
+ MPTCP_PM_ATTR_SUBFLOWS,
+ MPTCP_PM_ATTR_TOKEN,
+ MPTCP_PM_ATTR_LOC_ID,
+ MPTCP_PM_ATTR_ADDR_REMOTE,
+
+ __MPTCP_ATTR_AFTER_LAST
+};
+#define MPTCP_PM_ATTR_MAX (__MPTCP_ATTR_AFTER_LAST - 1)
+
+enum mptcp_event_attr {
+ MPTCP_ATTR_UNSPEC,
+ MPTCP_ATTR_TOKEN,
+ MPTCP_ATTR_FAMILY,
+ MPTCP_ATTR_LOC_ID,
+ MPTCP_ATTR_REM_ID,
+ MPTCP_ATTR_SADDR4,
+ MPTCP_ATTR_SADDR6,
+ MPTCP_ATTR_DADDR4,
+ MPTCP_ATTR_DADDR6,
+ MPTCP_ATTR_SPORT,
+ MPTCP_ATTR_DPORT,
+ MPTCP_ATTR_BACKUP,
+ MPTCP_ATTR_ERROR,
+ MPTCP_ATTR_FLAGS,
+ MPTCP_ATTR_TIMEOUT,
+ MPTCP_ATTR_IF_IDX,
+ MPTCP_ATTR_RESET_REASON,
+ MPTCP_ATTR_RESET_FLAGS,
+ MPTCP_ATTR_SERVER_SIDE,
+
+ __MPTCP_ATTR_MAX
+};
+#define MPTCP_ATTR_MAX (__MPTCP_ATTR_MAX - 1)
+
+enum {
+ MPTCP_PM_CMD_UNSPEC,
+ MPTCP_PM_CMD_ADD_ADDR,
+ MPTCP_PM_CMD_DEL_ADDR,
+ MPTCP_PM_CMD_GET_ADDR,
+ MPTCP_PM_CMD_FLUSH_ADDRS,
+ MPTCP_PM_CMD_SET_LIMITS,
+ MPTCP_PM_CMD_GET_LIMITS,
+ MPTCP_PM_CMD_SET_FLAGS,
+ MPTCP_PM_CMD_ANNOUNCE,
+ MPTCP_PM_CMD_REMOVE,
+ MPTCP_PM_CMD_SUBFLOW_CREATE,
+ MPTCP_PM_CMD_SUBFLOW_DESTROY,
+
+ __MPTCP_PM_CMD_AFTER_LAST
+};
+#define MPTCP_PM_CMD_MAX (__MPTCP_PM_CMD_AFTER_LAST - 1)
+
+#endif /* _UAPI_LINUX_MPTCP_PM_H */
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index 639524b59930..93cb411adf72 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -11,7 +11,7 @@
/**
* enum netdev_xdp_act
- * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers
+ * @NETDEV_XDP_ACT_BASIC: XDP features set supported by all drivers
* (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
* @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
* @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
@@ -34,28 +34,122 @@ enum netdev_xdp_act {
NETDEV_XDP_ACT_RX_SG = 32,
NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+ /* private: */
NETDEV_XDP_ACT_MASK = 127,
};
+/**
+ * enum netdev_xdp_rx_metadata
+ * @NETDEV_XDP_RX_METADATA_TIMESTAMP: Device is capable of exposing receive HW
+ * timestamp via bpf_xdp_metadata_rx_timestamp().
+ * @NETDEV_XDP_RX_METADATA_HASH: Device is capable of exposing receive packet
+ * hash via bpf_xdp_metadata_rx_hash().
+ * @NETDEV_XDP_RX_METADATA_VLAN_TAG: Device is capable of exposing receive
+ * packet VLAN tag via bpf_xdp_metadata_rx_vlan_tag().
+ */
+enum netdev_xdp_rx_metadata {
+ NETDEV_XDP_RX_METADATA_TIMESTAMP = 1,
+ NETDEV_XDP_RX_METADATA_HASH = 2,
+ NETDEV_XDP_RX_METADATA_VLAN_TAG = 4,
+};
+
+/**
+ * enum netdev_xsk_flags
+ * @NETDEV_XSK_FLAGS_TX_TIMESTAMP: HW timestamping egress packets is supported
+ * by the driver.
+ * @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the
+ * driver.
+ */
+enum netdev_xsk_flags {
+ NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1,
+ NETDEV_XSK_FLAGS_TX_CHECKSUM = 2,
+};
+
+enum netdev_queue_type {
+ NETDEV_QUEUE_TYPE_RX,
+ NETDEV_QUEUE_TYPE_TX,
+};
+
enum {
NETDEV_A_DEV_IFINDEX = 1,
NETDEV_A_DEV_PAD,
NETDEV_A_DEV_XDP_FEATURES,
+ NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
+ NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
+ NETDEV_A_DEV_XSK_FEATURES,
__NETDEV_A_DEV_MAX,
NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
};
enum {
+ NETDEV_A_PAGE_POOL_ID = 1,
+ NETDEV_A_PAGE_POOL_IFINDEX,
+ NETDEV_A_PAGE_POOL_NAPI_ID,
+ NETDEV_A_PAGE_POOL_INFLIGHT,
+ NETDEV_A_PAGE_POOL_INFLIGHT_MEM,
+ NETDEV_A_PAGE_POOL_DETACH_TIME,
+
+ __NETDEV_A_PAGE_POOL_MAX,
+ NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
+};
+
+enum {
+ NETDEV_A_PAGE_POOL_STATS_INFO = 1,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
+ NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
+ NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
+
+ __NETDEV_A_PAGE_POOL_STATS_MAX,
+ NETDEV_A_PAGE_POOL_STATS_MAX = (__NETDEV_A_PAGE_POOL_STATS_MAX - 1)
+};
+
+enum {
+ NETDEV_A_NAPI_IFINDEX = 1,
+ NETDEV_A_NAPI_ID,
+ NETDEV_A_NAPI_IRQ,
+ NETDEV_A_NAPI_PID,
+
+ __NETDEV_A_NAPI_MAX,
+ NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1)
+};
+
+enum {
+ NETDEV_A_QUEUE_ID = 1,
+ NETDEV_A_QUEUE_IFINDEX,
+ NETDEV_A_QUEUE_TYPE,
+ NETDEV_A_QUEUE_NAPI_ID,
+
+ __NETDEV_A_QUEUE_MAX,
+ NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1)
+};
+
+enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
NETDEV_CMD_DEV_DEL_NTF,
NETDEV_CMD_DEV_CHANGE_NTF,
+ NETDEV_CMD_PAGE_POOL_GET,
+ NETDEV_CMD_PAGE_POOL_ADD_NTF,
+ NETDEV_CMD_PAGE_POOL_DEL_NTF,
+ NETDEV_CMD_PAGE_POOL_CHANGE_NTF,
+ NETDEV_CMD_PAGE_POOL_STATS_GET,
+ NETDEV_CMD_QUEUE_GET,
+ NETDEV_CMD_NAPI_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
};
#define NETDEV_MCGRP_MGMT "mgmt"
+#define NETDEV_MCGRP_PAGE_POOL "page-pool"
#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index c4d4d8e42dc8..117c6a9b845b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -105,6 +105,7 @@ enum nft_verdicts {
* @NFT_MSG_DESTROYSETELEM: destroy a set element (enum nft_set_elem_attributes)
* @NFT_MSG_DESTROYOBJ: destroy a stateful object (enum nft_object_attributes)
* @NFT_MSG_DESTROYFLOWTABLE: destroy flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_GETSETELEM_RESET: get set elements and reset attached stateful expressions (enum nft_set_elem_attributes)
*/
enum nf_tables_msg_types {
NFT_MSG_NEWTABLE,
@@ -140,6 +141,7 @@ enum nf_tables_msg_types {
NFT_MSG_DESTROYSETELEM,
NFT_MSG_DESTROYOBJ,
NFT_MSG_DESTROYFLOWTABLE,
+ NFT_MSG_GETSETELEM_RESET,
NFT_MSG_MAX,
};
@@ -261,6 +263,7 @@ enum nft_chain_attributes {
* @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN)
* @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32)
* @NFTA_RULE_POSITION_ID: transaction unique identifier of the previous rule (NLA_U32)
+ * @NFTA_RULE_CHAIN_ID: add the rule to chain by ID, alternative to @NFTA_RULE_CHAIN (NLA_U32)
*/
enum nft_rule_attributes {
NFTA_RULE_UNSPEC,
@@ -282,9 +285,11 @@ enum nft_rule_attributes {
/**
* enum nft_rule_compat_flags - nf_tables rule compat flags
*
+ * @NFT_RULE_COMPAT_F_UNUSED: unused
* @NFT_RULE_COMPAT_F_INV: invert the check result
*/
enum nft_rule_compat_flags {
+ NFT_RULE_COMPAT_F_UNUSED = (1 << 0),
NFT_RULE_COMPAT_F_INV = (1 << 1),
NFT_RULE_COMPAT_F_MASK = NFT_RULE_COMPAT_F_INV,
};
@@ -859,12 +864,14 @@ enum nft_exthdr_flags {
* @NFT_EXTHDR_OP_TCP: match against tcp options
* @NFT_EXTHDR_OP_IPV4: match against ipv4 options
* @NFT_EXTHDR_OP_SCTP: match against sctp chunks
+ * @NFT_EXTHDR_OP_DCCP: match against dccp otions
*/
enum nft_exthdr_op {
NFT_EXTHDR_OP_IPV6,
NFT_EXTHDR_OP_TCPOPT,
NFT_EXTHDR_OP_IPV4,
NFT_EXTHDR_OP_SCTP,
+ NFT_EXTHDR_OP_DCCP,
__NFT_EXTHDR_OP_MAX
};
#define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1)
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index a494cf43a755..4ff328f3d339 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -87,7 +87,7 @@ struct ebt_entries {
/* nr. of entries */
unsigned int nentries;
/* entry list */
- char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ char data[] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
};
/* used for the bitmask of struct ebt_entry */
@@ -129,7 +129,7 @@ struct ebt_entry_match {
} u;
/* size of data */
unsigned int match_size;
- unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ unsigned char data[] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
};
struct ebt_entry_watcher {
@@ -142,7 +142,7 @@ struct ebt_entry_watcher {
} u;
/* size of data */
unsigned int watcher_size;
- unsigned char data[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ unsigned char data[] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
};
struct ebt_entry_target {
@@ -182,13 +182,15 @@ struct ebt_entry {
unsigned char sourcemsk[ETH_ALEN];
unsigned char destmac[ETH_ALEN];
unsigned char destmsk[ETH_ALEN];
- /* sizeof ebt_entry + matches */
- unsigned int watchers_offset;
- /* sizeof ebt_entry + matches + watchers */
- unsigned int target_offset;
- /* sizeof ebt_entry + matches + watchers + target */
- unsigned int next_offset;
- unsigned char elems[0] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
+ __struct_group(/* no tag */, offsets, /* no attrs */,
+ /* sizeof ebt_entry + matches */
+ unsigned int watchers_offset;
+ /* sizeof ebt_entry + matches + watchers */
+ unsigned int target_offset;
+ /* sizeof ebt_entry + matches + watchers + target */
+ unsigned int next_offset;
+ );
+ unsigned char elems[] __attribute__ ((aligned (__alignof__(struct ebt_replace))));
};
static __inline__ struct ebt_entry_target *
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index e2ae82e3f9f7..f87aaf28a649 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -298,6 +298,8 @@ struct nla_bitfield32 {
* entry has attributes again, the policy for those inner ones
* and the corresponding maxtype may be specified.
* @NL_ATTR_TYPE_BITFIELD32: &struct nla_bitfield32 attribute
+ * @NL_ATTR_TYPE_SINT: 32-bit or 64-bit signed attribute, aligned to 4B
+ * @NL_ATTR_TYPE_UINT: 32-bit or 64-bit unsigned attribute, aligned to 4B
*/
enum netlink_attribute_type {
NL_ATTR_TYPE_INVALID,
@@ -322,6 +324,9 @@ enum netlink_attribute_type {
NL_ATTR_TYPE_NESTED_ARRAY,
NL_ATTR_TYPE_BITFIELD32,
+
+ NL_ATTR_TYPE_SINT,
+ NL_ATTR_TYPE_UINT,
};
/**
diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h
new file mode 100644
index 000000000000..3cd044edee5d
--- /dev/null
+++ b/include/uapi/linux/nfsd_netlink.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NFSD_NETLINK_H
+#define _UAPI_LINUX_NFSD_NETLINK_H
+
+#define NFSD_FAMILY_NAME "nfsd"
+#define NFSD_FAMILY_VERSION 1
+
+enum {
+ NFSD_A_RPC_STATUS_XID = 1,
+ NFSD_A_RPC_STATUS_FLAGS,
+ NFSD_A_RPC_STATUS_PROG,
+ NFSD_A_RPC_STATUS_VERSION,
+ NFSD_A_RPC_STATUS_PROC,
+ NFSD_A_RPC_STATUS_SERVICE_TIME,
+ NFSD_A_RPC_STATUS_PAD,
+ NFSD_A_RPC_STATUS_SADDR4,
+ NFSD_A_RPC_STATUS_DADDR4,
+ NFSD_A_RPC_STATUS_SADDR6,
+ NFSD_A_RPC_STATUS_DADDR6,
+ NFSD_A_RPC_STATUS_SPORT,
+ NFSD_A_RPC_STATUS_DPORT,
+ NFSD_A_RPC_STATUS_COMPOUND_OPS,
+
+ __NFSD_A_RPC_STATUS_MAX,
+ NFSD_A_RPC_STATUS_MAX = (__NFSD_A_RPC_STATUS_MAX - 1)
+};
+
+enum {
+ NFSD_CMD_RPC_STATUS_GET = 1,
+
+ __NFSD_CMD_MAX,
+ NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1)
+};
+
+#endif /* _UAPI_LINUX_NFSD_NETLINK_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index c59fec406da5..1ccdcae24372 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -72,7 +72,7 @@
* For drivers supporting TDLS with external setup (WIPHY_FLAG_SUPPORTS_TDLS
* and WIPHY_FLAG_TDLS_EXTERNAL_SETUP), the station lifetime is as follows:
* - a setup station entry is added, not yet authorized, without any rate
- * or capability information, this just exists to avoid race conditions
+ * or capability information; this just exists to avoid race conditions
* - when the TDLS setup is done, a single NL80211_CMD_SET_STATION is valid
* to add rate and capability information to the station and at the same
* time mark it authorized.
@@ -87,7 +87,7 @@
* DOC: Frame transmission/registration support
*
* Frame transmission and registration support exists to allow userspace
- * management entities such as wpa_supplicant react to management frames
+ * management entities such as wpa_supplicant to react to management frames
* that are not being handled by the kernel. This includes, for example,
* certain classes of action frames that cannot be handled in the kernel
* for various reasons.
@@ -113,7 +113,7 @@
*
* Frame transmission allows userspace to send for example the required
* responses to action frames. It is subject to some sanity checking,
- * but many frames can be transmitted. When a frame was transmitted, its
+ * but many frames can be transmitted. When a frame is transmitted, its
* status is indicated to the sending socket.
*
* For more technical details, see the corresponding command descriptions
@@ -123,7 +123,7 @@
/**
* DOC: Virtual interface / concurrency capabilities
*
- * Some devices are able to operate with virtual MACs, they can have
+ * Some devices are able to operate with virtual MACs; they can have
* more than one virtual interface. The capability handling for this
* is a bit complex though, as there may be a number of restrictions
* on the types of concurrency that are supported.
@@ -135,7 +135,7 @@
* Once concurrency is desired, more attributes must be observed:
* To start with, since some interface types are purely managed in
* software, like the AP-VLAN type in mac80211 for example, there's
- * an additional list of these, they can be added at any time and
+ * an additional list of these; they can be added at any time and
* are only restricted by some semantic restrictions (e.g. AP-VLAN
* cannot be added without a corresponding AP interface). This list
* is exported in the %NL80211_ATTR_SOFTWARE_IFTYPES attribute.
@@ -164,17 +164,17 @@
* Packet coalesce feature helps to reduce number of received interrupts
* to host by buffering these packets in firmware/hardware for some
* predefined time. Received interrupt will be generated when one of the
- * following events occur.
+ * following events occurs.
* a) Expiration of hardware timer whose expiration time is set to maximum
* coalescing delay of matching coalesce rule.
- * b) Coalescing buffer in hardware reaches it's limit.
+ * b) Coalescing buffer in hardware reaches its limit.
* c) Packet doesn't match any of the configured coalesce rules.
*
* User needs to configure following parameters for creating a coalesce
* rule.
* a) Maximum coalescing delay
* b) List of packet patterns which needs to be matched
- * c) Condition for coalescence. pattern 'match' or 'no match'
+ * c) Condition for coalescence: pattern 'match' or 'no match'
* Multiple such rules can be created.
*/
@@ -213,7 +213,7 @@
/**
* DOC: FILS shared key authentication offload
*
- * FILS shared key authentication offload can be advertized by drivers by
+ * FILS shared key authentication offload can be advertised by drivers by
* setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support
* FILS shared key authentication offload should be able to construct the
* authentication and association frames for FILS shared key authentication and
@@ -239,7 +239,7 @@
* The PMKSA can be maintained in userspace persistently so that it can be used
* later after reboots or wifi turn off/on also.
*
- * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS
+ * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertised by a FILS
* capable AP supporting PMK caching. It specifies the scope within which the
* PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and
* %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based
@@ -290,12 +290,12 @@
* If the configuration needs to be applied for specific peer then the MAC
* address of the peer needs to be passed in %NL80211_ATTR_MAC, otherwise the
* configuration will be applied for all the connected peers in the vif except
- * any peers that have peer specific configuration for the TID by default; if
- * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer specific values
+ * any peers that have peer-specific configuration for the TID by default; if
+ * the %NL80211_TID_CONFIG_ATTR_OVERRIDE flag is set, peer-specific values
* will be overwritten.
*
- * All this configuration is valid only for STA's current connection
- * i.e. the configuration will be reset to default when the STA connects back
+ * All this configuration is valid only for STA's current connection,
+ * i.e., the configuration will be reset to default when the STA connects back
* after disconnection/roaming, and this configuration will be cleared when
* the interface goes down.
*/
@@ -326,7 +326,7 @@
/**
* DOC: Multi-Link Operation
*
- * In Multi-Link Operation, a connection between to MLDs utilizes multiple
+ * In Multi-Link Operation, a connection between two MLDs utilizes multiple
* links. To use this in nl80211, various commands and responses now need
* to or will include the new %NL80211_ATTR_MLO_LINKS attribute.
* Additionally, various commands that need to operate on a specific link
@@ -335,6 +335,15 @@
*/
/**
+ * DOC: OWE DH IE handling offload
+ *
+ * By setting @NL80211_EXT_FEATURE_OWE_OFFLOAD flag, drivers can indicate
+ * kernel/application space to avoid DH IE handling. When this flag is
+ * advertised, the driver/device will take care of DH IE inclusion and
+ * processing of peer DH IE to generate PMK.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -512,7 +521,7 @@
* %NL80211_ATTR_SCHED_SCAN_PLANS. If %NL80211_ATTR_SCHED_SCAN_PLANS is
* not specified and only %NL80211_ATTR_SCHED_SCAN_INTERVAL is specified,
* scheduled scan will run in an infinite loop with the specified interval.
- * These attributes are mutually exculsive,
+ * These attributes are mutually exclusive,
* i.e. NL80211_ATTR_SCHED_SCAN_INTERVAL must not be passed if
* NL80211_ATTR_SCHED_SCAN_PLANS is defined.
* If for some reason scheduled scan is aborted by the driver, all scan
@@ -543,7 +552,7 @@
* %NL80211_CMD_STOP_SCHED_SCAN command is received or when the interface
* is brought down while a scheduled scan was running.
*
- * @NL80211_CMD_GET_SURVEY: get survey resuls, e.g. channel occupation
+ * @NL80211_CMD_GET_SURVEY: get survey results, e.g. channel occupation
* or noise level
* @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
* NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
@@ -554,12 +563,13 @@
* using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID,
* %NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS
* authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier
- * advertized by a FILS capable AP identifying the scope of PMKSA in an
+ * advertised by a FILS capable AP identifying the scope of PMKSA in an
* ESS.
* @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
* (for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID,
* %NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS
- * authentication.
+ * authentication. Additionally in case of SAE offload and OWE offloads
+ * PMKSA entry can be deleted using %NL80211_ATTR_SSID.
* @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
*
* @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
@@ -598,7 +608,7 @@
* BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify
* the SSID (mainly for association, but is included in authentication
* request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ +
- * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequence of the
+ * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequency of the
* channel in MHz. %NL80211_ATTR_AUTH_TYPE is used to specify the
* authentication type. %NL80211_ATTR_IE is used to define IEs
* (VendorSpecificInfo, but also including RSN IE and FT IEs) to be added
@@ -807,7 +817,7 @@
* reached.
* @NL80211_CMD_SET_CHANNEL: Set the channel (using %NL80211_ATTR_WIPHY_FREQ
* and the attributes determining channel width) the given interface
- * (identifed by %NL80211_ATTR_IFINDEX) shall operate on.
+ * (identified by %NL80211_ATTR_IFINDEX) shall operate on.
* In case multiple channels are supported by the device, the mechanism
* with which it switches channels is implementation-defined.
* When a monitor interface is given, it can only switch channel while
@@ -879,7 +889,7 @@
* inform userspace of the new replay counter.
*
* @NL80211_CMD_PMKSA_CANDIDATE: This is used as an event to inform userspace
- * of PMKSA caching dandidates.
+ * of PMKSA caching candidates.
*
* @NL80211_CMD_TDLS_OPER: Perform a high-level TDLS command (e.g. link setup).
* In addition, this can be used as an event to request userspace to take
@@ -915,7 +925,7 @@
*
* @NL80211_CMD_PROBE_CLIENT: Probe an associated station on an AP interface
* by sending a null data frame to it and reporting when the frame is
- * acknowleged. This is used to allow timing out inactive clients. Uses
+ * acknowledged. This is used to allow timing out inactive clients. Uses
* %NL80211_ATTR_IFINDEX and %NL80211_ATTR_MAC. The command returns a
* direct reply with an %NL80211_ATTR_COOKIE that is later used to match
* up the event with the request. The event includes the same data and
@@ -1126,11 +1136,15 @@
* @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously
* configured PMK for the authenticator address identified by
* %NL80211_ATTR_MAC.
- * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates an 802.1X FT roam was
- * completed successfully. Drivers that support 4 way handshake offload
- * should send this event after indicating 802.1X FT assocation with
- * %NL80211_CMD_ROAM. If the 4 way handshake failed %NL80211_CMD_DISCONNECT
- * should be indicated instead.
+ * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates port is authorized and
+ * open for regular data traffic. For STA/P2P-client, this event is sent
+ * with AP MAC address and for AP/P2P-GO, the event carries the STA/P2P-
+ * client MAC address.
+ * Drivers that support 4 way handshake offload should send this event for
+ * STA/P2P-client after successful 4-way HS or after 802.1X FT following
+ * NL80211_CMD_CONNECT or NL80211_CMD_ROAM. Drivers using AP/P2P-GO 4-way
+ * handshake offload should send this event on successful completion of
+ * 4-way handshake with the peer (STA/P2P-client).
* @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
* and RX notification. This command is used both as a request to transmit
* a control port frame and as a notification that a control port frame
@@ -1309,6 +1323,16 @@
* The number of peers that HW timestamping can be enabled for concurrently
* is indicated by %NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS.
*
+ * @NL80211_CMD_LINKS_REMOVED: Notify userspace about the removal of STA MLD
+ * setup links due to AP MLD removing the corresponding affiliated APs with
+ * Multi-Link reconfiguration. %NL80211_ATTR_MLO_LINKS is used to provide
+ * information about the removed STA MLD setup links.
+ *
+ * @NL80211_CMD_SET_TID_TO_LINK_MAPPING: Set the TID to Link Mapping for a
+ * non-AP MLD station. The %NL80211_ATTR_MLO_TTLM_DLINK and
+ * %NL80211_ATTR_MLO_TTLM_ULINK attributes are used to specify the
+ * TID to Link mapping for downlink/uplink traffic.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1562,6 +1586,10 @@ enum nl80211_commands {
NL80211_CMD_SET_HW_TIMESTAMP,
+ NL80211_CMD_LINKS_REMOVED,
+
+ NL80211_CMD_SET_TID_TO_LINK_MAPPING,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1819,7 +1847,7 @@ enum nl80211_commands {
* using %CMD_CONTROL_PORT_FRAME. If control port routing over NL80211 is
* to be used then userspace must also use the %NL80211_ATTR_SOCKET_OWNER
* flag. When used with %NL80211_ATTR_CONTROL_PORT_NO_PREAUTH, pre-auth
- * frames are not forwared over the control port.
+ * frames are not forwarded over the control port.
*
* @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
* We recommend using nested, driver-specific attributes within this.
@@ -1956,10 +1984,10 @@ enum nl80211_commands {
* bit. Depending on which antennas are selected in the bitmap, 802.11n
* drivers can derive which chainmasks to use (if all antennas belonging to
* a particular chain are disabled this chain should be disabled) and if
- * a chain has diversity antennas wether diversity should be used or not.
+ * a chain has diversity antennas whether diversity should be used or not.
* HT capabilities (STBC, TX Beamforming, Antenna selection) can be
* derived from the available chains after applying the antenna mask.
- * Non-802.11n drivers can derive wether to use diversity or not.
+ * Non-802.11n drivers can derive whether to use diversity or not.
* Drivers may reject configurations or RX/TX mask combinations they cannot
* support by returning -EINVAL.
*
@@ -2529,7 +2557,7 @@ enum nl80211_commands {
* from successful FILS authentication and is used with
* %NL80211_CMD_CONNECT.
*
- * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP
+ * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertised by a FILS AP
* identifying the scope of PMKSAs. This is used with
* @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
*
@@ -2683,11 +2711,13 @@ enum nl80211_commands {
*
* @NL80211_ATTR_FILS_DISCOVERY: Optional parameter to configure FILS
* discovery. It is a nested attribute, see
- * &enum nl80211_fils_discovery_attributes.
+ * &enum nl80211_fils_discovery_attributes. Userspace should pass an empty
+ * nested attribute to disable this feature and delete the templates.
*
* @NL80211_ATTR_UNSOL_BCAST_PROBE_RESP: Optional parameter to configure
* unsolicited broadcast probe response. It is a nested attribute, see
- * &enum nl80211_unsol_bcast_probe_resp_attributes.
+ * &enum nl80211_unsol_bcast_probe_resp_attributes. Userspace should pass an empty
+ * nested attribute to disable this feature and delete the templates.
*
* @NL80211_ATTR_S1G_CAPABILITY: S1G Capability information element (from
* association request when used with NL80211_CMD_NEW_STATION)
@@ -2805,6 +2835,22 @@ enum nl80211_commands {
* index. If the userspace includes more RNR elements than number of
* MBSSID elements then these will be added in every EMA beacon.
*
+ * @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is
+ * disabled.
+ *
+ * @NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA: Include BSS usage data, i.e.
+ * include BSSes that can only be used in restricted scenarios and/or
+ * cannot be used at all.
+ *
+ * @NL80211_ATTR_MLO_TTLM_DLINK: Binary attribute specifying the downlink TID to
+ * link mapping. The length is 8 * sizeof(u16). For each TID the link
+ * mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element)
+ * in Draft P802.11be_D4.0.
+ * @NL80211_ATTR_MLO_TTLM_ULINK: Binary attribute specifying the uplink TID to
+ * link mapping. The length is 8 * sizeof(u16). For each TID the link
+ * mapping is as defined in section 9.4.2.314 (TID-To-Link Mapping element)
+ * in Draft P802.11be_D4.0.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3341,6 +3387,13 @@ enum nl80211_attrs {
NL80211_ATTR_EMA_RNR_ELEMS,
+ NL80211_ATTR_MLO_LINK_DISABLED,
+
+ NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA,
+
+ NL80211_ATTR_MLO_TTLM_DLINK,
+ NL80211_ATTR_MLO_TTLM_ULINK,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3667,6 +3720,13 @@ enum nl80211_eht_ru_alloc {
* (u8, see &enum nl80211_eht_gi)
* @NL80211_RATE_INFO_EHT_RU_ALLOC: EHT RU allocation, if not present then
* non-OFDMA was used (u8, see &enum nl80211_eht_ru_alloc)
+ * @NL80211_RATE_INFO_S1G_MCS: S1G MCS index (u8, 0-10)
+ * @NL80211_RATE_INFO_S1G_NSS: S1G NSS value (u8, 1-4)
+ * @NL80211_RATE_INFO_1_MHZ_WIDTH: 1 MHz S1G rate
+ * @NL80211_RATE_INFO_2_MHZ_WIDTH: 2 MHz S1G rate
+ * @NL80211_RATE_INFO_4_MHZ_WIDTH: 4 MHz S1G rate
+ * @NL80211_RATE_INFO_8_MHZ_WIDTH: 8 MHz S1G rate
+ * @NL80211_RATE_INFO_16_MHZ_WIDTH: 16 MHz S1G rate
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -3693,6 +3753,13 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_EHT_NSS,
NL80211_RATE_INFO_EHT_GI,
NL80211_RATE_INFO_EHT_RU_ALLOC,
+ NL80211_RATE_INFO_S1G_MCS,
+ NL80211_RATE_INFO_S1G_NSS,
+ NL80211_RATE_INFO_1_MHZ_WIDTH,
+ NL80211_RATE_INFO_2_MHZ_WIDTH,
+ NL80211_RATE_INFO_4_MHZ_WIDTH,
+ NL80211_RATE_INFO_8_MHZ_WIDTH,
+ NL80211_RATE_INFO_16_MHZ_WIDTH,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -4133,7 +4200,7 @@ enum nl80211_wmm_rule {
* (100 * dBm).
* @NL80211_FREQUENCY_ATTR_DFS_STATE: current state for DFS
* (enum nl80211_dfs_state)
- * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in miliseconds for how long
+ * @NL80211_FREQUENCY_ATTR_DFS_TIME: time in milliseconds for how long
* this channel is in this DFS state.
* @NL80211_FREQUENCY_ATTR_NO_HT40_MINUS: HT40- isn't possible with this
* channel as the control channel
@@ -4187,6 +4254,16 @@ enum nl80211_wmm_rule {
* as the primary or any of the secondary channels isn't possible
* @NL80211_FREQUENCY_ATTR_NO_EHT: EHT operation is not allowed on this channel
* in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_PSD: Power spectral density (in dBm) that
+ * is allowed on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_DFS_CONCURRENT: Operation on this channel is
+ * allowed for peer-to-peer or adhoc communication under the control
+ * of a DFS master which operates on the same channel (FCC-594280 D01
+ * Section B.3). Should be used together with %NL80211_RRF_DFS only.
+ * @NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT: Client connection to VLP AP
+ * not allowed using this channel
+ * @NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT: Client connection to AFC AP
+ * not allowed using this channel
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4225,6 +4302,10 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_16MHZ,
NL80211_FREQUENCY_ATTR_NO_320MHZ,
NL80211_FREQUENCY_ATTR_NO_EHT,
+ NL80211_FREQUENCY_ATTR_PSD,
+ NL80211_FREQUENCY_ATTR_DFS_CONCURRENT,
+ NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT,
+ NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4325,6 +4406,8 @@ enum nl80211_reg_type {
* a given frequency range. The value is in mBm (100 * dBm).
* @NL80211_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
* If not present or 0 default CAC time will be used.
+ * @NL80211_ATTR_POWER_RULE_PSD: power spectral density (in dBm).
+ * This could be negative.
* @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number
* currently defined
* @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use
@@ -4342,6 +4425,8 @@ enum nl80211_reg_rule_attr {
NL80211_ATTR_DFS_CAC_TIME,
+ NL80211_ATTR_POWER_RULE_PSD,
+
/* keep last */
__NL80211_REG_RULE_ATTR_AFTER_LAST,
NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1
@@ -4424,6 +4509,14 @@ enum nl80211_sched_scan_match_attr {
* @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed
* @NL80211_RRF_NO_HE: HE operation not allowed
* @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed
+ * @NL80211_RRF_NO_EHT: EHT operation not allowed
+ * @NL80211_RRF_PSD: Ruleset has power spectral density value
+ * @NL80211_RRF_DFS_CONCURRENT: Operation on this channel is allowed for
+ peer-to-peer or adhoc communication under the control of a DFS master
+ which operates on the same channel (FCC-594280 D01 Section B.3).
+ Should be used together with %NL80211_RRF_DFS only.
+ * @NL80211_RRF_NO_UHB_VLP_CLIENT: Client connection to VLP AP not allowed
+ * @NL80211_RRF_NO_UHB_AFC_CLIENT: Client connection to AFC AP not allowed
*/
enum nl80211_reg_rule_flags {
NL80211_RRF_NO_OFDM = 1<<0,
@@ -4443,6 +4536,11 @@ enum nl80211_reg_rule_flags {
NL80211_RRF_NO_160MHZ = 1<<16,
NL80211_RRF_NO_HE = 1<<17,
NL80211_RRF_NO_320MHZ = 1<<18,
+ NL80211_RRF_NO_EHT = 1<<19,
+ NL80211_RRF_PSD = 1<<20,
+ NL80211_RRF_DFS_CONCURRENT = 1<<21,
+ NL80211_RRF_NO_UHB_VLP_CLIENT = 1<<22,
+ NL80211_RRF_NO_UHB_AFC_CLIENT = 1<<23,
};
#define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR
@@ -4980,6 +5078,33 @@ enum nl80211_bss_scan_width {
};
/**
+ * enum nl80211_bss_use_for - bitmap indicating possible BSS use
+ * @NL80211_BSS_USE_FOR_NORMAL: Use this BSS for normal "connection",
+ * including IBSS/MBSS depending on the type.
+ * @NL80211_BSS_USE_FOR_MLD_LINK: This BSS can be used as a link in an
+ * MLO connection. Note that for an MLO connection, all links including
+ * the assoc link must have this flag set, and the assoc link must
+ * additionally have %NL80211_BSS_USE_FOR_NORMAL set.
+ */
+enum nl80211_bss_use_for {
+ NL80211_BSS_USE_FOR_NORMAL = 1 << 0,
+ NL80211_BSS_USE_FOR_MLD_LINK = 1 << 1,
+};
+
+/**
+ * enum nl80211_bss_cannot_use_reasons - reason(s) connection to a
+ * BSS isn't possible
+ * @NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY: NSTR nonprimary links aren't
+ * supported by the device, and this BSS entry represents one.
+ * @NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH: STA is not supporting
+ * the AP power type (SP, VLP, AP) that the AP uses.
+ */
+enum nl80211_bss_cannot_use_reasons {
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 1 << 0,
+ NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 1 << 1,
+};
+
+/**
* enum nl80211_bss - netlink attributes for a BSS
*
* @__NL80211_BSS_INVALID: invalid
@@ -5010,7 +5135,7 @@ enum nl80211_bss_scan_width {
* elements from a Beacon frame (bin); not present if no Beacon frame has
* yet been received
* @NL80211_BSS_CHAN_WIDTH: channel width of the control channel
- * (u32, enum nl80211_bss_scan_width)
+ * (u32, enum nl80211_bss_scan_width) - No longer used!
* @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64)
* (not present if no beacon frame has been received yet)
* @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and
@@ -5031,6 +5156,14 @@ enum nl80211_bss_scan_width {
* @NL80211_BSS_FREQUENCY_OFFSET: frequency offset in KHz
* @NL80211_BSS_MLO_LINK_ID: MLO link ID of the BSS (u8).
* @NL80211_BSS_MLD_ADDR: MLD address of this BSS if connected to it.
+ * @NL80211_BSS_USE_FOR: u32 bitmap attribute indicating what the BSS can be
+ * used for, see &enum nl80211_bss_use_for.
+ * @NL80211_BSS_CANNOT_USE_REASONS: Indicates the reason that this BSS cannot
+ * be used for all or some of the possible uses by the device reporting it,
+ * even though its presence was detected.
+ * This is a u64 attribute containing a bitmap of values from
+ * &enum nl80211_cannot_use_reasons, note that the attribute may be missing
+ * if no reasons are specified.
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -5058,6 +5191,8 @@ enum nl80211_bss {
NL80211_BSS_FREQUENCY_OFFSET,
NL80211_BSS_MLO_LINK_ID,
NL80211_BSS_MLD_ADDR,
+ NL80211_BSS_USE_FOR,
+ NL80211_BSS_CANNOT_USE_REASONS,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -5406,7 +5541,7 @@ enum nl80211_tx_rate_setting {
* (%NL80211_TID_CONFIG_ATTR_TIDS, %NL80211_TID_CONFIG_ATTR_OVERRIDE).
* @NL80211_TID_CONFIG_ATTR_PEER_SUPP: same as the previous per-vif one, but
* per peer instead.
- * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribue, if set indicates
+ * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribute, if set indicates
* that the new configuration overrides all previous peer
* configurations, otherwise previous peer specific configurations
* should be left untouched.
@@ -5789,7 +5924,7 @@ enum nl80211_attr_coalesce_rule {
/**
* enum nl80211_coalesce_condition - coalesce rule conditions
- * @NL80211_COALESCE_CONDITION_MATCH: coalaesce Rx packets when patterns
+ * @NL80211_COALESCE_CONDITION_MATCH: coalesce Rx packets when patterns
* in a rule are matched.
* @NL80211_COALESCE_CONDITION_NO_MATCH: coalesce Rx packets when patterns
* in a rule are not matched.
@@ -5888,7 +6023,7 @@ enum nl80211_if_combination_attrs {
* enum nl80211_plink_state - state of a mesh peer link finite state machine
*
* @NL80211_PLINK_LISTEN: initial state, considered the implicit
- * state of non existent mesh peer links
+ * state of non-existent mesh peer links
* @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to
* this mesh peer
* @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received
@@ -6181,7 +6316,7 @@ enum nl80211_feature_flags {
* request to use RRM (see %NL80211_ATTR_USE_RRM) with
* %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set
* the ASSOC_REQ_USE_RRM flag in the association request even if
- * NL80211_FEATURE_QUIET is not advertized.
+ * NL80211_FEATURE_QUIET is not advertised.
* @NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER: This device supports MU-MIMO air
* sniffer which means that it can be configured to hear packets from
* certain groups which can be configured by the
@@ -6193,13 +6328,15 @@ enum nl80211_feature_flags {
* the BSS that the interface that requested the scan is connected to
* (if available).
* @NL80211_EXT_FEATURE_BSS_PARENT_TSF: Per BSS, this driver reports the
- * time the last beacon/probe was received. The time is the TSF of the
- * BSS that the interface that requested the scan is connected to
- * (if available).
+ * time the last beacon/probe was received. For a non-MLO connection, the
+ * time is the TSF of the BSS that the interface that requested the scan is
+ * connected to (if available). For an MLO connection, the time is the TSF
+ * of the BSS corresponding with link ID specified in the scan request (if
+ * specified).
* @NL80211_EXT_FEATURE_SET_SCAN_DWELL: This driver supports configuration of
* channel dwell time.
* @NL80211_EXT_FEATURE_BEACON_RATE_LEGACY: Driver supports beacon rate
- * configuration (AP/mesh), supporting a legacy (non HT/VHT) rate.
+ * configuration (AP/mesh), supporting a legacy (non-HT/VHT) rate.
* @NL80211_EXT_FEATURE_BEACON_RATE_HT: Driver supports beacon rate
* configuration (AP/mesh) with HT rates.
* @NL80211_EXT_FEATURE_BEACON_RATE_VHT: Driver supports beacon rate
@@ -6372,6 +6509,17 @@ enum nl80211_feature_flags {
* in authentication and deauthentication frames sent to unassociated peer
* using @NL80211_CMD_FRAME.
*
+ * @NL80211_EXT_FEATURE_OWE_OFFLOAD: Driver/Device wants to do OWE DH IE
+ * handling in station mode.
+ *
+ * @NL80211_EXT_FEATURE_OWE_OFFLOAD_AP: Driver/Device wants to do OWE DH IE
+ * handling in AP mode.
+ *
+ * @NL80211_EXT_FEATURE_DFS_CONCURRENT: The device supports peer-to-peer or
+ * ad hoc operation on DFS channels under the control of a concurrent
+ * DFS master on the same channel as described in FCC-594280 D01
+ * (Section B.3). This, for example, allows P2P GO and P2P clients to
+ * operate on DFS channels as long as there's a concurrent BSS connection.
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6443,6 +6591,9 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_PUNCT,
NL80211_EXT_FEATURE_SECURE_NAN,
NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA,
+ NL80211_EXT_FEATURE_OWE_OFFLOAD,
+ NL80211_EXT_FEATURE_OWE_OFFLOAD_AP,
+ NL80211_EXT_FEATURE_DFS_CONCURRENT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -6527,7 +6678,7 @@ enum nl80211_timeout_reason {
* request parameters IE in the probe request
* @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at
- * rate of at least 5.5M. In case non OCE AP is discovered in the channel,
+ * rate of at least 5.5M. In case non-OCE AP is discovered in the channel,
* only the first probe req in the channel will be sent in high rate.
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request
* tx deferral (dot11FILSProbeDelay shall be set to 15ms)
@@ -6563,7 +6714,7 @@ enum nl80211_timeout_reason {
* received on the 2.4/5 GHz channels to actively scan only the 6GHz
* channels on which APs are expected to be found. Note that when not set,
* the scan logic would scan all 6GHz channels, but since transmission of
- * probe requests on non PSC channels is limited, it is highly likely that
+ * probe requests on non-PSC channels is limited, it is highly likely that
* these channels would passively be scanned. Also note that when the flag
* is set, in addition to the colocated APs, PSC channels would also be
* scanned if the user space has asked for it.
@@ -6895,7 +7046,7 @@ enum nl80211_nan_func_term_reason {
* The instance ID for the follow up Service Discovery Frame. This is u8.
* @NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID: relevant if the function's type
* is follow up. This is a u8.
- * The requestor instance ID for the follow up Service Discovery Frame.
+ * The requester instance ID for the follow up Service Discovery Frame.
* @NL80211_NAN_FUNC_FOLLOW_UP_DEST: the MAC address of the recipient of the
* follow up Service Discovery Frame. This is a binary attribute.
* @NL80211_NAN_FUNC_CLOSE_RANGE: is this function limited for devices in a
@@ -7285,7 +7436,7 @@ enum nl80211_peer_measurement_attrs {
* @NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED: flag attribute indicating if
* trigger based ranging measurement is supported
* @NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED: flag attribute indicating
- * if non trigger based ranging measurement is supported
+ * if non-trigger-based ranging measurement is supported
*
* @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
* @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
@@ -7339,7 +7490,7 @@ enum nl80211_peer_measurement_ftm_capa {
* if neither %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED nor
* %NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED is set, EDCA based
* ranging will be used.
- * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non trigger based
+ * @NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED: request non-trigger-based
* ranging measurement (flag)
* This attribute and %NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED are
* mutually exclusive.
@@ -7417,7 +7568,7 @@ enum nl80211_peer_measurement_ftm_failure_reasons {
* @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS: number of FTM Request frames
* transmitted (u32, optional)
* @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES: number of FTM Request frames
- * that were acknowleged (u32, optional)
+ * that were acknowledged (u32, optional)
* @NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME: retry time received from the
* busy peer (u32, seconds)
* @NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP: actual number of bursts exponent
@@ -7578,7 +7729,7 @@ enum nl80211_iftype_akm_attributes {
* @NL80211_FILS_DISCOVERY_ATTR_INT_MIN: Minimum packet interval (u32, TU).
* Allowed range: 0..10000 (TU = Time Unit)
* @NL80211_FILS_DISCOVERY_ATTR_INT_MAX: Maximum packet interval (u32, TU).
- * Allowed range: 0..10000 (TU = Time Unit)
+ * Allowed range: 0..10000 (TU = Time Unit). If set to 0, the feature is disabled.
* @NL80211_FILS_DISCOVERY_ATTR_TMPL: Template data for FILS discovery action
* frame including the headers.
*
@@ -7611,7 +7762,8 @@ enum nl80211_fils_discovery_attributes {
*
* @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT: Maximum packet interval (u32, TU).
* Allowed range: 0..20 (TU = Time Unit). IEEE P802.11ax/D6.0
- * 26.17.2.3.2 (AP behavior for fast passive scanning).
+ * 26.17.2.3.2 (AP behavior for fast passive scanning). If set to 0, the feature is
+ * disabled.
* @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL: Unsolicited broadcast probe response
* frame template (binary).
*
diff --git a/include/uapi/linux/npcm-video.h b/include/uapi/linux/npcm-video.h
new file mode 100644
index 000000000000..1d39f6f38c96
--- /dev/null
+++ b/include/uapi/linux/npcm-video.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Controls header for NPCM video driver
+ *
+ * Copyright (C) 2022 Nuvoton Technologies
+ */
+
+#ifndef _UAPI_LINUX_NPCM_VIDEO_H
+#define _UAPI_LINUX_NPCM_VIDEO_H
+
+#include <linux/v4l2-controls.h>
+
+/*
+ * Check Documentation/userspace-api/media/drivers/npcm-video.rst for control
+ * details.
+ */
+
+/*
+ * This control is meant to set the mode of NPCM Video Capture/Differentiation
+ * (VCD) engine.
+ *
+ * The VCD engine supports two modes:
+ * COMPLETE - Capture the next complete frame into memory.
+ * DIFF - Compare the incoming frame with the frame stored in memory, and
+ * updates the differentiated frame in memory.
+ */
+#define V4L2_CID_NPCM_CAPTURE_MODE (V4L2_CID_USER_NPCM_BASE + 0)
+
+enum v4l2_npcm_capture_mode {
+ V4L2_NPCM_CAPTURE_MODE_COMPLETE = 0, /* COMPLETE mode */
+ V4L2_NPCM_CAPTURE_MODE_DIFF = 1, /* DIFF mode */
+};
+
+/*
+ * This control is meant to get the count of compressed HEXTILE rectangles which
+ * is relevant to the number of differentiated frames if VCD is in DIFF mode.
+ * And the count will always be 1 if VCD is in COMPLETE mode.
+ */
+#define V4L2_CID_NPCM_RECT_COUNT (V4L2_CID_USER_NPCM_BASE + 1)
+
+#endif /* _UAPI_LINUX_NPCM_VIDEO_H */
diff --git a/include/uapi/linux/nsm.h b/include/uapi/linux/nsm.h
new file mode 100644
index 000000000000..e529f232f6c0
--- /dev/null
+++ b/include/uapi/linux/nsm.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#ifndef __UAPI_LINUX_NSM_H
+#define __UAPI_LINUX_NSM_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define NSM_MAGIC 0x0A
+
+#define NSM_REQUEST_MAX_SIZE 0x1000
+#define NSM_RESPONSE_MAX_SIZE 0x3000
+
+struct nsm_iovec {
+ __u64 addr; /* Virtual address of target buffer */
+ __u64 len; /* Length of target buffer */
+};
+
+/* Raw NSM message. Only available with CAP_SYS_ADMIN. */
+struct nsm_raw {
+ /* Request from user */
+ struct nsm_iovec request;
+ /* Response to user */
+ struct nsm_iovec response;
+};
+#define NSM_IOCTL_RAW _IOWR(NSM_MAGIC, 0x0, struct nsm_raw)
+
+#endif /* __UAPI_LINUX_NSM_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index c5d62ee82567..efc82c318fa2 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -765,6 +765,7 @@ struct ovs_action_push_vlan {
*/
enum ovs_hash_alg {
OVS_HASH_ALG_L4,
+ OVS_HASH_ALG_SYM_L4,
};
/*
@@ -964,6 +965,7 @@ struct check_pkt_len_arg {
* start of the packet or at the start of the l3 header depending on the value
* of l3 tunnel flag in the tun_flags field of OVS_ACTION_ATTR_ADD_MPLS
* argument.
+ * @OVS_ACTION_ATTR_DROP: Explicit drop action.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -1001,6 +1003,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */
OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */
OVS_ACTION_ATTR_DEC_TTL, /* Nested OVS_DEC_TTL_ATTR_*. */
+ OVS_ACTION_ATTR_DROP, /* u32 error code. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index dc2000e0fe3a..a39193213ff2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -80,6 +80,7 @@
#define PCI_HEADER_TYPE_NORMAL 0
#define PCI_HEADER_TYPE_BRIDGE 1
#define PCI_HEADER_TYPE_CARDBUS 2
+#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */
#define PCI_BIST 0x0f /* 8 bits */
#define PCI_BIST_CODE_MASK 0x0f /* Return result */
@@ -637,6 +638,7 @@
#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
#define PCI_EXP_RTSTA 0x20 /* Root Status */
+#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
@@ -738,6 +740,7 @@
#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */
#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */
#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */
+#define PCI_EXT_CAP_ID_PL_32GT 0x2A /* Physical Layer 32.0 GT/s */
#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE
@@ -929,12 +932,13 @@
/* Process Address Space ID */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
-#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
-#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */
+#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */
+#define PCI_PASID_CAP_WIDTH 0x1f00
#define PCI_PASID_CTRL 0x06 /* PASID control register */
-#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
-#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
-#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */
+#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */
+#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */
+#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */
@@ -974,6 +978,8 @@
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
+#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */
+#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */
#define PCI_EXT_CAP_LTR_SIZEOF 8
/* Access Control Service */
@@ -1041,9 +1047,16 @@
#define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */
#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */
#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */
+#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */
+#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */
#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
@@ -1087,6 +1100,8 @@
#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */
#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
+#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */
+#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */
/* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */
#define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index f9c1af8d141b..94b46b043b53 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -11,7 +11,8 @@
#define __UAPI_LINUX_PCITEST_H
#define PCITEST_BAR _IO('P', 0x1)
-#define PCITEST_LEGACY_IRQ _IO('P', 0x2)
+#define PCITEST_INTX_IRQ _IO('P', 0x2)
+#define PCITEST_LEGACY_IRQ PCITEST_INTX_IRQ
#define PCITEST_MSI _IOW('P', 0x3, int)
#define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
#define PCITEST_READ _IOW('P', 0x5, unsigned long)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 37675437b768..3a64499b0f5d 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
+ PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -235,6 +237,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -982,6 +986,12 @@ enum perf_event_type {
* { u64 nr;
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
* { u64 from, to, flags } lbr[nr];
+ * #
+ * # The format of the counters is decided by the
+ * # "branch_counter_nr" and "branch_counter_width",
+ * # which are defined in the ABI.
+ * #
+ * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS
* } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
@@ -1339,7 +1349,8 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
-/* 5-0x8 available */
+/* 5-0x7 available */
+#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
@@ -1426,6 +1437,9 @@ struct perf_branch_entry {
reserved:31;
};
+/* Size of used info bits in struct perf_branch_entry */
+#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33
+
union perf_sample_weight {
__u64 full;
#if defined(__LITTLE_ENDIAN_BITFIELD)
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 648a82f32666..ea277039f89d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -99,7 +99,7 @@ enum {
* versions.
*/
#define TCA_ACT_GACT 5
-#define TCA_ACT_IPT 6
+#define TCA_ACT_IPT 6 /* obsoleted, can be reused */
#define TCA_ACT_PEDIT 7
#define TCA_ACT_MIRRED 8
#define TCA_ACT_NAT 9
@@ -120,7 +120,7 @@ enum tca_id {
TCA_ID_UNSPEC = 0,
TCA_ID_POLICE = 1,
TCA_ID_GACT = TCA_ACT_GACT,
- TCA_ID_IPT = TCA_ACT_IPT,
+ TCA_ID_IPT = TCA_ACT_IPT, /* Obsoleted, can be reused */
TCA_ID_PEDIT = TCA_ACT_PEDIT,
TCA_ID_MIRRED = TCA_ACT_MIRRED,
TCA_ID_NAT = TCA_ACT_NAT,
@@ -280,37 +280,6 @@ struct tc_u32_pcnt {
#define TC_U32_MAXDEPTH 8
-
-/* RSVP filter */
-
-enum {
- TCA_RSVP_UNSPEC,
- TCA_RSVP_CLASSID,
- TCA_RSVP_DST,
- TCA_RSVP_SRC,
- TCA_RSVP_PINFO,
- TCA_RSVP_POLICE,
- TCA_RSVP_ACT,
- __TCA_RSVP_MAX
-};
-
-#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
-
-struct tc_rsvp_gpi {
- __u32 key;
- __u32 mask;
- int offset;
-};
-
-struct tc_rsvp_pinfo {
- struct tc_rsvp_gpi dpi;
- struct tc_rsvp_gpi spi;
- __u8 protocol;
- __u8 tunnelid;
- __u8 tunnelhdr;
- __u8 pad;
-};
-
/* ROUTE filter */
enum {
@@ -341,22 +310,6 @@ enum {
#define TCA_FW_MAX (__TCA_FW_MAX - 1)
-/* TC index filter */
-
-enum {
- TCA_TCINDEX_UNSPEC,
- TCA_TCINDEX_HASH,
- TCA_TCINDEX_MASK,
- TCA_TCINDEX_SHIFT,
- TCA_TCINDEX_FALL_THROUGH,
- TCA_TCINDEX_CLASSID,
- TCA_TCINDEX_POLICE,
- TCA_TCINDEX_ACT,
- __TCA_TCINDEX_MAX
-};
-
-#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1)
-
/* Flow filter */
enum {
@@ -594,6 +547,13 @@ enum {
TCA_FLOWER_KEY_L2TPV3_SID, /* be32 */
+ TCA_FLOWER_L2_MISS, /* u8 */
+
+ TCA_FLOWER_KEY_CFM, /* nested */
+
+ TCA_FLOWER_KEY_SPI, /* be32 */
+ TCA_FLOWER_KEY_SPI_MASK, /* be32 */
+
__TCA_FLOWER_MAX,
};
@@ -702,6 +662,15 @@ enum {
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
+enum {
+ TCA_FLOWER_KEY_CFM_OPT_UNSPEC,
+ TCA_FLOWER_KEY_CFM_MD_LEVEL,
+ TCA_FLOWER_KEY_CFM_OPCODE,
+ __TCA_FLOWER_KEY_CFM_OPT_MAX,
+};
+
+#define TCA_FLOWER_KEY_CFM_OPT_MAX (__TCA_FLOWER_KEY_CFM_OPT_MAX - 1)
+
#define TCA_FLOWER_MASK_FLAGS_RANGE (1 << 0) /* Range-based match */
/* Match-all classifier */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 51a7addc56c6..a3cd0c2dc995 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -477,115 +477,6 @@ enum {
#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
-
-/* CBQ section */
-
-#define TC_CBQ_MAXPRIO 8
-#define TC_CBQ_MAXLEVEL 8
-#define TC_CBQ_DEF_EWMA 5
-
-struct tc_cbq_lssopt {
- unsigned char change;
- unsigned char flags;
-#define TCF_CBQ_LSS_BOUNDED 1
-#define TCF_CBQ_LSS_ISOLATED 2
- unsigned char ewma_log;
- unsigned char level;
-#define TCF_CBQ_LSS_FLAGS 1
-#define TCF_CBQ_LSS_EWMA 2
-#define TCF_CBQ_LSS_MAXIDLE 4
-#define TCF_CBQ_LSS_MINIDLE 8
-#define TCF_CBQ_LSS_OFFTIME 0x10
-#define TCF_CBQ_LSS_AVPKT 0x20
- __u32 maxidle;
- __u32 minidle;
- __u32 offtime;
- __u32 avpkt;
-};
-
-struct tc_cbq_wrropt {
- unsigned char flags;
- unsigned char priority;
- unsigned char cpriority;
- unsigned char __reserved;
- __u32 allot;
- __u32 weight;
-};
-
-struct tc_cbq_ovl {
- unsigned char strategy;
-#define TC_CBQ_OVL_CLASSIC 0
-#define TC_CBQ_OVL_DELAY 1
-#define TC_CBQ_OVL_LOWPRIO 2
-#define TC_CBQ_OVL_DROP 3
-#define TC_CBQ_OVL_RCLASSIC 4
- unsigned char priority2;
- __u16 pad;
- __u32 penalty;
-};
-
-struct tc_cbq_police {
- unsigned char police;
- unsigned char __res1;
- unsigned short __res2;
-};
-
-struct tc_cbq_fopt {
- __u32 split;
- __u32 defmap;
- __u32 defchange;
-};
-
-struct tc_cbq_xstats {
- __u32 borrows;
- __u32 overactions;
- __s32 avgidle;
- __s32 undertime;
-};
-
-enum {
- TCA_CBQ_UNSPEC,
- TCA_CBQ_LSSOPT,
- TCA_CBQ_WRROPT,
- TCA_CBQ_FOPT,
- TCA_CBQ_OVL_STRATEGY,
- TCA_CBQ_RATE,
- TCA_CBQ_RTAB,
- TCA_CBQ_POLICE,
- __TCA_CBQ_MAX,
-};
-
-#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
-
-/* dsmark section */
-
-enum {
- TCA_DSMARK_UNSPEC,
- TCA_DSMARK_INDICES,
- TCA_DSMARK_DEFAULT_INDEX,
- TCA_DSMARK_SET_TC_INDEX,
- TCA_DSMARK_MASK,
- TCA_DSMARK_VALUE,
- __TCA_DSMARK_MAX,
-};
-
-#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
-
-/* ATM section */
-
-enum {
- TCA_ATM_UNSPEC,
- TCA_ATM_FD, /* file/socket descriptor */
- TCA_ATM_PTR, /* pointer to descriptor - later */
- TCA_ATM_HDR, /* LL header */
- TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
- TCA_ATM_ADDR, /* PVC address (for output only) */
- TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
- __TCA_ATM_MAX,
-};
-
-#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
-
/* Network emulator */
enum {
@@ -603,6 +494,7 @@ enum {
TCA_NETEM_JITTER64,
TCA_NETEM_SLOT,
TCA_NETEM_SLOT_DIST,
+ TCA_NETEM_PRNG_SEED,
__TCA_NETEM_MAX,
};
@@ -940,15 +832,22 @@ enum {
TCA_FQ_HORIZON_DROP, /* drop packets beyond horizon, or cap their EDT */
+ TCA_FQ_PRIOMAP, /* prio2band */
+
+ TCA_FQ_WEIGHTS, /* Weights for each band */
+
__TCA_FQ_MAX
};
#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
+#define FQ_BANDS 3
+#define FQ_MIN_WEIGHT 16384
+
struct tc_fq_qd_stats {
__u64 gc_flows;
- __u64 highprio_packets;
- __u64 tcp_retrans;
+ __u64 highprio_packets; /* obsolete */
+ __u64 tcp_retrans; /* obsolete */
__u64 throttled;
__u64 flows_plimit;
__u64 pkts_too_long;
@@ -961,6 +860,10 @@ struct tc_fq_qd_stats {
__u64 ce_mark; /* packets above ce_threshold */
__u64 horizon_drops;
__u64 horizon_caps;
+ __u64 fastpath_packets;
+ __u64 band_drops[FQ_BANDS];
+ __u32 band_pkt_count[FQ_BANDS];
+ __u32 pad;
};
/* Heavy-Hitter Filter */
@@ -1260,6 +1163,16 @@ enum {
};
enum {
+ TCA_TAPRIO_OFFLOAD_STATS_PAD = 1, /* u64 */
+ TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS, /* u64 */
+ TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS, /* u64 */
+
+ /* add new constants above here */
+ __TCA_TAPRIO_OFFLOAD_STATS_CNT,
+ TCA_TAPRIO_OFFLOAD_STATS_MAX = (__TCA_TAPRIO_OFFLOAD_STATS_CNT - 1)
+};
+
+enum {
TCA_TAPRIO_ATTR_UNSPEC,
TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
diff --git a/include/uapi/linux/pktcdvd.h b/include/uapi/linux/pktcdvd.h
index 6a5552dfd6af..987a3022dc5f 100644
--- a/include/uapi/linux/pktcdvd.h
+++ b/include/uapi/linux/pktcdvd.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
/*
+ * UNUSED:
* 1 for normal debug messages, 2 is very verbose. 0 to turn it off.
*/
#define PACKET_DEBUG 1
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index f23d9a16507f..370ed14b1ae0 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -283,7 +283,8 @@ struct prctl_mm_map {
/* Memory deny write / execute */
#define PR_SET_MDWE 65
-# define PR_MDWE_REFUSE_EXEC_GAIN 1
+# define PR_MDWE_REFUSE_EXEC_GAIN (1UL << 0)
+# define PR_MDWE_NO_INHERIT (1UL << 1)
#define PR_GET_MDWE 66
@@ -294,4 +295,15 @@ struct prctl_mm_map {
#define PR_SET_MEMORY_MERGE 67
#define PR_GET_MEMORY_MERGE 68
+
+#define PR_RISCV_V_SET_CONTROL 69
+#define PR_RISCV_V_GET_CONTROL 70
+# define PR_RISCV_V_VSTATE_CTRL_DEFAULT 0
+# define PR_RISCV_V_VSTATE_CTRL_OFF 1
+# define PR_RISCV_V_VSTATE_CTRL_ON 2
+# define PR_RISCV_V_VSTATE_CTRL_INHERIT (1 << 4)
+# define PR_RISCV_V_VSTATE_CTRL_CUR_MASK 0x3
+# define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK 0xc
+# define PR_RISCV_V_VSTATE_CTRL_MASK 0x1f
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/psp-dbc.h b/include/uapi/linux/psp-dbc.h
new file mode 100644
index 000000000000..b3845a9ff5fd
--- /dev/null
+++ b/include/uapi/linux/psp-dbc.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Userspace interface for AMD Dynamic Boost Control (DBC)
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __PSP_DBC_USER_H__
+#define __PSP_DBC_USER_H__
+
+#include <linux/types.h>
+
+/**
+ * DOC: AMD Dynamic Boost Control (DBC) interface
+ */
+
+#define DBC_NONCE_SIZE 16
+#define DBC_SIG_SIZE 32
+#define DBC_UID_SIZE 16
+
+/**
+ * struct dbc_user_nonce - Nonce exchange structure (input/output).
+ * @auth_needed: Whether the PSP should authenticate this request (input).
+ * 0: no authentication, PSP will return single use nonce.
+ * 1: authentication: PSP will return multi-use nonce.
+ * @nonce: 8 byte value used for future authentication (output).
+ * @signature: Optional 32 byte signature created by software using a
+ * previous nonce (input).
+ */
+struct dbc_user_nonce {
+ __u32 auth_needed;
+ __u8 nonce[DBC_NONCE_SIZE];
+ __u8 signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * struct dbc_user_setuid - UID exchange structure (input).
+ * @uid: 16 byte value representing software identity
+ * @signature: 32 byte signature created by software using a previous nonce
+ */
+struct dbc_user_setuid {
+ __u8 uid[DBC_UID_SIZE];
+ __u8 signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * struct dbc_user_param - Parameter exchange structure (input/output).
+ * @msg_index: Message indicating what parameter to set or get (input)
+ * @param: 4 byte parameter, units are message specific. (input/output)
+ * @signature: 32 byte signature.
+ * - When sending a message this is to be created by software
+ * using a previous nonce (input)
+ * - For interpreting results, this signature is updated by the
+ * PSP to allow software to validate the authenticity of the
+ * results.
+ */
+struct dbc_user_param {
+ __u32 msg_index;
+ __u32 param;
+ __u8 signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * Dynamic Boost Control (DBC) IOC
+ *
+ * possible return codes for all DBC IOCTLs:
+ * 0: success
+ * -EINVAL: invalid input
+ * -E2BIG: excess data passed
+ * -EFAULT: failed to copy to/from userspace
+ * -EBUSY: mailbox in recovery or in use
+ * -ENODEV: driver not bound with PSP device
+ * -EACCES: request isn't authorized
+ * -EINVAL: invalid parameter
+ * -ETIMEDOUT: request timed out
+ * -EAGAIN: invalid request for state machine
+ * -ENOENT: not implemented
+ * -ENFILE: overflow
+ * -EPERM: invalid signature
+ * -EIO: unknown error
+ */
+#define DBC_IOC_TYPE 'D'
+
+/**
+ * DBCIOCNONCE - Fetch a nonce from the PSP for authenticating commands.
+ * If a nonce is fetched without authentication it can only
+ * be utilized for one command.
+ * If a nonce is fetched with authentication it can be used
+ * for multiple requests.
+ */
+#define DBCIOCNONCE _IOWR(DBC_IOC_TYPE, 0x1, struct dbc_user_nonce)
+
+/**
+ * DBCIOCUID - Set the user ID (UID) of a calling process.
+ * The user ID is 8 bytes long. It must be programmed using a
+ * 32 byte signature built using the nonce fetched from
+ * DBCIOCNONCE.
+ * The UID can only be set once until the system is rebooted.
+ */
+#define DBCIOCUID _IOW(DBC_IOC_TYPE, 0x2, struct dbc_user_setuid)
+
+/**
+ * DBCIOCPARAM - Set or get a parameter from the PSP.
+ * This request will only work after DBCIOCUID has successfully
+ * set the UID of the calling process.
+ * Whether the parameter is set or get is controlled by the
+ * message ID in the request.
+ * This command must be sent using a 32 byte signature built
+ * using the nonce fetched from DBCIOCNONCE.
+ * When the command succeeds, the 32 byte signature will be
+ * updated by the PSP for software to authenticate the results.
+ */
+#define DBCIOCPARAM _IOWR(DBC_IOC_TYPE, 0x3, struct dbc_user_param)
+
+/**
+ * enum dbc_cmd_msg - Messages utilized by DBCIOCPARAM
+ * @PARAM_GET_FMAX_CAP: Get frequency cap (MHz)
+ * @PARAM_SET_FMAX_CAP: Set frequency cap (MHz)
+ * @PARAM_GET_PWR_CAP: Get socket power cap (mW)
+ * @PARAM_SET_PWR_CAP: Set socket power cap (mW)
+ * @PARAM_GET_GFX_MODE: Get graphics mode (0/1)
+ * @PARAM_SET_GFX_MODE: Set graphics mode (0/1)
+ * @PARAM_GET_CURR_TEMP: Get current temperature (degrees C)
+ * @PARAM_GET_FMAX_MAX: Get maximum allowed value for frequency (MHz)
+ * @PARAM_GET_FMAX_MIN: Get minimum allowed value for frequency (MHz)
+ * @PARAM_GET_SOC_PWR_MAX: Get maximum allowed value for SoC power (mw)
+ * @PARAM_GET_SOC_PWR_MIN: Get minimum allowed value for SoC power (mw)
+ * @PARAM_GET_SOC_PWR_CUR: Get current value for SoC Power (mW)
+ */
+enum dbc_cmd_msg {
+ PARAM_GET_FMAX_CAP = 0x3,
+ PARAM_SET_FMAX_CAP = 0x4,
+ PARAM_GET_PWR_CAP = 0x5,
+ PARAM_SET_PWR_CAP = 0x6,
+ PARAM_GET_GFX_MODE = 0x7,
+ PARAM_SET_GFX_MODE = 0x8,
+ PARAM_GET_CURR_TEMP = 0x9,
+ PARAM_GET_FMAX_MAX = 0xA,
+ PARAM_GET_FMAX_MIN = 0xB,
+ PARAM_GET_SOC_PWR_MAX = 0xC,
+ PARAM_GET_SOC_PWR_MIN = 0xD,
+ PARAM_GET_SOC_PWR_CUR = 0xE,
+};
+
+#endif /* __PSP_DBC_USER_H__ */
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 1c9da485318f..b44ba7dcdefc 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -68,6 +68,7 @@ typedef enum {
SEV_RET_INVALID_PARAM,
SEV_RET_RESOURCE_LIMIT,
SEV_RET_SECURE_DATA_INVALID,
+ SEV_RET_INVALID_KEY = 0x27,
SEV_RET_MAX,
} sev_ret_code;
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 1d108d597f66..da700999cad4 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -95,7 +95,8 @@ struct ptp_clock_caps {
int cross_timestamping;
/* Whether the clock supports adjust phase */
int adjust_phase;
- int rsv[12]; /* Reserved for future use. */
+ int max_phase_adj; /* Maximum phase adjustment in nanoseconds. */
+ int rsv[11]; /* Reserved for future use. */
};
struct ptp_extts_request {
@@ -223,6 +224,8 @@ struct ptp_pin_desc {
_IOWR(PTP_CLK_MAGIC, 17, struct ptp_sys_offset_precise)
#define PTP_SYS_OFFSET_EXTENDED2 \
_IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended)
+#define PTP_MASK_CLEAR_ALL _IO(PTP_CLK_MAGIC, 19)
+#define PTP_MASK_EN_SINGLE _IOW(PTP_CLK_MAGIC, 20, unsigned int)
struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occured. */
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index f17c9636a859..52090105b828 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -77,6 +77,7 @@
#define QFMT_VFS_V0 2
#define QFMT_OCFS2 3
#define QFMT_VFS_V1 4
+#define QFMT_SHMEM 5
/* Size of block in which space limits are passed through the quota
* interface */
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 6c0aa577730f..5a43c23f53bf 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -2,15 +2,11 @@
/*
md_p.h : physical layout of Linux RAID devices
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MD_P_H
@@ -237,7 +233,7 @@ struct mdp_superblock_1 {
char set_name[32]; /* set and interpreted by user-space */
__le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/
- __le32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */
+ __le32 level; /* 0,1,4,5 */
__le32 layout; /* only for raid5 and raid10 currently */
__le64 size; /* used size of component devices, in 512byte sectors */
diff --git a/include/uapi/linux/raid/md_u.h b/include/uapi/linux/raid/md_u.h
index 105307244961..7be89a4906e7 100644
--- a/include/uapi/linux/raid/md_u.h
+++ b/include/uapi/linux/raid/md_u.h
@@ -2,15 +2,11 @@
/*
md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
Copyright (C) 1998 Ingo Molnar
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _UAPI_MD_U_H
@@ -107,11 +103,6 @@ typedef struct mdu_array_info_s {
} mdu_array_info_t;
-/* non-obvious values for 'level' */
-#define LEVEL_MULTIPATH (-4)
-#define LEVEL_LINEAR (-1)
-#define LEVEL_FAULTY (-5)
-
/* we need a value for 'no level specified' and 0
* means 'raid0', so we need something else. This is
* for internal use only
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index ac5d6a3031db..4fc22908bc09 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -2,7 +2,7 @@
#ifndef _UAPI_LINUX_RESOURCE_H
#define _UAPI_LINUX_RESOURCE_H
-#include <linux/time.h>
+#include <linux/time_types.h>
#include <linux/types.h>
/*
diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h
index 1637e68177d9..f0c8da2b185b 100644
--- a/include/uapi/linux/rpmsg.h
+++ b/include/uapi/linux/rpmsg.h
@@ -43,4 +43,14 @@ struct rpmsg_endpoint_info {
*/
#define RPMSG_RELEASE_DEV_IOCTL _IOW(0xb5, 0x4, struct rpmsg_endpoint_info)
+/**
+ * Get the flow control state of the remote rpmsg char device.
+ */
+#define RPMSG_GET_OUTGOING_FLOWCONTROL _IOR(0xb5, 0x5, int)
+
+/**
+ * Set the flow control state of the local rpmsg char device.
+ */
+#define RPMSG_SET_INCOMING_FLOWCONTROL _IOR(0xb5, 0x6, int)
+
#endif
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 51c13cf9c5ae..3b687d20c9ed 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -502,13 +502,17 @@ enum {
#define RTAX_MAX (__RTAX_MAX - 1)
-#define RTAX_FEATURE_ECN (1 << 0)
-#define RTAX_FEATURE_SACK (1 << 1)
-#define RTAX_FEATURE_TIMESTAMP (1 << 2)
-#define RTAX_FEATURE_ALLFRAG (1 << 3)
-
-#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \
- RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG)
+#define RTAX_FEATURE_ECN (1 << 0)
+#define RTAX_FEATURE_SACK (1 << 1) /* unused */
+#define RTAX_FEATURE_TIMESTAMP (1 << 2) /* unused */
+#define RTAX_FEATURE_ALLFRAG (1 << 3) /* unused */
+#define RTAX_FEATURE_TCP_USEC_TS (1 << 4)
+
+#define RTAX_FEATURE_MASK (RTAX_FEATURE_ECN | \
+ RTAX_FEATURE_SACK | \
+ RTAX_FEATURE_TIMESTAMP | \
+ RTAX_FEATURE_ALLFRAG | \
+ RTAX_FEATURE_TCP_USEC_TS)
struct rta_session {
__u8 proto;
diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h
index f2c4589d4dbf..90662385689b 100644
--- a/include/uapi/linux/sched/types.h
+++ b/include/uapi/linux/sched/types.h
@@ -4,10 +4,6 @@
#include <linux/types.h>
-struct sched_param {
- int sched_priority;
-};
-
#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
#define SCHED_ATTR_SIZE_VER1 56 /* add: util_{min,max} */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 0fdc6ef02b94..dbfc9b37fcae 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -115,6 +115,8 @@ struct seccomp_notif_resp {
__u32 flags;
};
+#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0)
+
/* valid flags for seccomp_notif_addfd */
#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
@@ -150,4 +152,6 @@ struct seccomp_notif_addfd {
#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \
struct seccomp_notif_addfd)
+#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64)
+
#endif /* _UAPI_LINUX_SECCOMP_H */
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index dc2efd345133..d3994b7716bc 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -49,13 +49,23 @@ enum opal_lock_flags {
OPAL_SAVE_FOR_LOCK = 0x01,
};
+enum opal_key_type {
+ OPAL_INCLUDED = 0, /* key[] is the key */
+ OPAL_KEYRING, /* key is in keyring */
+};
+
struct opal_key {
__u8 lr;
__u8 key_len;
- __u8 __align[6];
+ __u8 key_type;
+ __u8 __align[5];
__u8 key[OPAL_KEY_MAX];
};
+enum opal_revert_lsp_opts {
+ OPAL_PRESERVE = 0x01,
+};
+
struct opal_lr_act {
struct opal_key key;
__u32 sum;
@@ -173,6 +183,17 @@ struct opal_geometry {
__u8 __align[3];
};
+struct opal_discovery {
+ __u64 data;
+ __u64 size;
+};
+
+struct opal_revert_lsp {
+ struct opal_key key;
+ __u32 options;
+ __u32 __pad;
+};
+
#define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock)
#define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock)
#define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key)
@@ -192,5 +213,7 @@ struct opal_geometry {
#define IOC_OPAL_GET_STATUS _IOR('p', 236, struct opal_status)
#define IOC_OPAL_GET_LR_STATUS _IOW('p', 237, struct opal_lr_status)
#define IOC_OPAL_GET_GEOMETRY _IOR('p', 238, struct opal_geometry)
+#define IOC_OPAL_DISCOVERY _IOW('p', 239, struct opal_discovery)
+#define IOC_OPAL_REVERT_LSP _IOW('p', 240, struct opal_revert_lsp)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h
index 53bc1af67a41..de9b4733607e 100644
--- a/include/uapi/linux/serial.h
+++ b/include/uapi/linux/serial.h
@@ -11,6 +11,7 @@
#ifndef _UAPI_LINUX_SERIAL_H
#define _UAPI_LINUX_SERIAL_H
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/tty_flags.h>
@@ -137,17 +138,20 @@ struct serial_icounter_struct {
* * %SER_RS485_ADDRB - Enable RS485 addressing mode.
* * %SER_RS485_ADDR_RECV - Receive address filter (enables @addr_recv). Requires %SER_RS485_ADDRB.
* * %SER_RS485_ADDR_DEST - Destination address (enables @addr_dest). Requires %SER_RS485_ADDRB.
+ * * %SER_RS485_MODE_RS422 - Enable RS422. Requires %SER_RS485_ENABLED.
*/
struct serial_rs485 {
__u32 flags;
-#define SER_RS485_ENABLED (1 << 0)
-#define SER_RS485_RTS_ON_SEND (1 << 1)
-#define SER_RS485_RTS_AFTER_SEND (1 << 2)
-#define SER_RS485_RX_DURING_TX (1 << 4)
-#define SER_RS485_TERMINATE_BUS (1 << 5)
-#define SER_RS485_ADDRB (1 << 6)
-#define SER_RS485_ADDR_RECV (1 << 7)
-#define SER_RS485_ADDR_DEST (1 << 8)
+#define SER_RS485_ENABLED _BITUL(0)
+#define SER_RS485_RTS_ON_SEND _BITUL(1)
+#define SER_RS485_RTS_AFTER_SEND _BITUL(2)
+/* Placeholder for bit 3: SER_RS485_RTS_BEFORE_SEND, which isn't used anymore */
+#define SER_RS485_RX_DURING_TX _BITUL(4)
+#define SER_RS485_TERMINATE_BUS _BITUL(5)
+#define SER_RS485_ADDRB _BITUL(6)
+#define SER_RS485_ADDR_RECV _BITUL(7)
+#define SER_RS485_ADDR_DEST _BITUL(8)
+#define SER_RS485_MODE_RS422 _BITUL(9)
__u32 delay_rts_before_send;
__u32 delay_rts_after_send;
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 281fa286555c..9c007a106330 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -1,22 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
- * linux/drivers/char/serial_core.h
- *
* Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _UAPILINUX_SERIAL_CORE_H
#define _UAPILINUX_SERIAL_CORE_H
@@ -25,6 +9,10 @@
/*
* The type definitions. These are from Ted Ts'o's serial.h
+ * By historical reasons the values from 0 to 13 are defined
+ * in the include/uapi/linux/serial.h, do not define them here.
+ * Values 0 to 19 are used by setserial from busybox and must never
+ * be modified.
*/
#define PORT_NS16550A 14
#define PORT_XSCALE 15
@@ -94,15 +82,9 @@
#define PORT_SCIF 53
#define PORT_IRDA 54
-/* Samsung S3C2410 SoC and derivatives thereof */
-#define PORT_S3C2410 55
-
/* SGI IP22 aka Indy / Challenge S / Indigo 2 */
#define PORT_IP22ZILOG 56
-/* Sharp LH7a40x -- an ARM9 SoC series */
-#define PORT_LH7A40X 57
-
/* PPC CPM type number */
#define PORT_CPM 58
@@ -112,37 +94,23 @@
/* IBM icom */
#define PORT_ICOM 60
-/* Samsung S3C2440 SoC */
-#define PORT_S3C2440 61
-
/* Motorola i.MX SoC */
#define PORT_IMX 62
-/* Marvell MPSC (obsolete unused) */
-#define PORT_MPSC 63
-
/* TXX9 type number */
#define PORT_TXX9 64
-/* Samsung S3C2400 SoC */
-#define PORT_S3C2400 67
-
-/* M32R SIO */
-#define PORT_M32R_SIO 68
-
/*Digi jsm */
#define PORT_JSM 69
/* SUN4V Hypervisor Console */
#define PORT_SUNHV 72
-#define PORT_S3C2412 73
-
/* Xilinx uartlite */
#define PORT_UARTLITE 74
-/* Blackfin bf5xx */
-#define PORT_BFIN 75
+/* Broadcom BCM7271 UART */
+#define PORT_BCM7271 76
/* Broadcom SB1250, etc. SOC */
#define PORT_SB1250_DUART 77
@@ -150,13 +118,6 @@
/* Freescale ColdFire */
#define PORT_MCF 78
-/* Blackfin SPORT */
-#define PORT_BFIN_SPORT 79
-
-/* MN10300 on-chip UART numbers */
-#define PORT_MN10300 80
-#define PORT_MN10300_CTS 81
-
#define PORT_SC26XX 82
/* SH-SCI */
@@ -164,9 +125,6 @@
#define PORT_S3C6400 84
-/* NWPSERIAL, now removed */
-#define PORT_NWPSERIAL 85
-
/* MAX3100 */
#define PORT_MAX3100 86
@@ -225,13 +183,10 @@
/* ST ASC type numbers */
#define PORT_ASC 105
-/* Tilera TILE-Gx UART */
-#define PORT_TILEGX 106
-
/* MEN 16z135 UART */
#define PORT_MEN_Z135 107
-/* SC16IS74xx */
+/* SC16IS7xx */
#define PORT_SC16IS7XX 108
/* MESON */
@@ -243,9 +198,6 @@
/* SPRD SERIAL */
#define PORT_SPRD 111
-/* Cris v10 / v32 SoC */
-#define PORT_CRIS 112
-
/* STM32 USART */
#define PORT_STM32 113
@@ -279,4 +231,7 @@
/* Sunplus UART */
#define PORT_SUNPLUS 123
+/* Generic type identifier for ports which type is not important to userspace. */
+#define PORT_GENERIC (-1)
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h
index 08b3527e1b93..9c987b04e2d0 100644
--- a/include/uapi/linux/serial_reg.h
+++ b/include/uapi/linux/serial_reg.h
@@ -49,6 +49,7 @@
#define UART_IIR_FIFO_ENABLED_8250 0x00 /* 8250: no FIFO */
#define UART_IIR_FIFO_ENABLED_16550 0x80 /* 16550: (broken/unusable) FIFO */
#define UART_IIR_FIFO_ENABLED_16550A 0xc0 /* 16550A: FIFO enabled */
+#define UART_IIR_FIFO_ENABLED_16750 0xe0 /* 16750: 64 bytes FIFO enabled */
#define UART_FCR 2 /* Out: FIFO Control Register */
#define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */
diff --git a/include/uapi/linux/sev-guest.h b/include/uapi/linux/sev-guest.h
index 2aa39112cf8d..154a87a1eca9 100644
--- a/include/uapi/linux/sev-guest.h
+++ b/include/uapi/linux/sev-guest.h
@@ -14,9 +14,11 @@
#include <linux/types.h>
+#define SNP_REPORT_USER_DATA_SIZE 64
+
struct snp_report_req {
/* user data that should be included in the report */
- __u8 user_data[64];
+ __u8 user_data[SNP_REPORT_USER_DATA_SIZE];
/* The vmpl level to be included in the report */
__u32 vmpl;
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
index bb4dacca31e7..b531e3ef011a 100644
--- a/include/uapi/linux/smc.h
+++ b/include/uapi/linux/smc.h
@@ -107,6 +107,8 @@ enum {
enum {
SMC_NLA_LGR_R_V2_UNSPEC,
SMC_NLA_LGR_R_V2_DIRECT, /* u8 */
+ SMC_NLA_LGR_R_V2_MAX_CONNS, /* u8 */
+ SMC_NLA_LGR_R_V2_MAX_LINKS, /* u8 */
__SMC_NLA_LGR_R_V2_MAX,
SMC_NLA_LGR_R_V2_MAX = __SMC_NLA_LGR_R_V2_MAX - 1
};
@@ -158,6 +160,8 @@ enum {
SMC_NLA_LGR_D_CHID, /* u16 */
SMC_NLA_LGR_D_PAD, /* flag */
SMC_NLA_LGR_D_V2_COMMON, /* nest */
+ SMC_NLA_LGR_D_EXT_GID, /* u64 */
+ SMC_NLA_LGR_D_PEER_EXT_GID, /* u64 */
__SMC_NLA_LGR_D_MAX,
SMC_NLA_LGR_D_MAX = __SMC_NLA_LGR_D_MAX - 1
};
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index 8cb3a6fef553..58eceb7f5df2 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -107,6 +107,8 @@ struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
__aligned_u64 my_gid; /* My GID */
__aligned_u64 token; /* Token of DMB */
__aligned_u64 peer_token; /* Token of remote DMBE */
+ __aligned_u64 peer_gid_ext; /* Peer GID (extended part) */
+ __aligned_u64 my_gid_ext; /* My GID (extended part) */
};
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 26f33a4c253d..a0819c6a5988 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -24,7 +24,7 @@ enum
IPSTATS_MIB_INOCTETS, /* InOctets */
IPSTATS_MIB_INDELIVERS, /* InDelivers */
IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */
- IPSTATS_MIB_OUTPKTS, /* OutRequests */
+ IPSTATS_MIB_OUTREQUESTS, /* OutRequests */
IPSTATS_MIB_OUTOCTETS, /* OutOctets */
/* other fields */
IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */
@@ -57,6 +57,7 @@ enum
IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
IPSTATS_MIB_CEPKTS, /* InCEPkts */
IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
+ IPSTATS_MIB_OUTPKTS, /* OutTransmits */
__IPSTATS_MIB_MAX
};
@@ -296,6 +297,11 @@ enum
LINUX_MIB_TCPMIGRATEREQSUCCESS, /* TCPMigrateReqSuccess */
LINUX_MIB_TCPMIGRATEREQFAILURE, /* TCPMigrateReqFailure */
LINUX_MIB_TCPPLBREHASH, /* TCPPLBRehash */
+ LINUX_MIB_TCPAOREQUIRED, /* TCPAORequired */
+ LINUX_MIB_TCPAOBAD, /* TCPAOBad */
+ LINUX_MIB_TCPAOKEYNOTFOUND, /* TCPAOKeyNotFound */
+ LINUX_MIB_TCPAOGOOD, /* TCPAOGood */
+ LINUX_MIB_TCPAODROPPEDICMPS, /* TCPAODroppedIcmps */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/spi/spi.h b/include/uapi/linux/spi/spi.h
index 9d5f58059703..ca56e477d161 100644
--- a/include/uapi/linux/spi/spi.h
+++ b/include/uapi/linux/spi/spi.h
@@ -28,6 +28,7 @@
#define SPI_RX_OCTAL _BITUL(14) /* receive with 8 wires */
#define SPI_3WIRE_HIZ _BITUL(15) /* high impedance turnaround */
#define SPI_RX_CPHA_FLIP _BITUL(16) /* flip CPHA on Rx only xfer */
+#define SPI_MOSI_IDLE_LOW _BITUL(17) /* leave mosi line low when idle */
/*
* All the bits defined above should be covered by SPI_MODE_USER_MASK.
@@ -37,6 +38,6 @@
* These bits must not overlap. A static assert check should make sure of that.
* If adding extra bits, make sure to increase the bit index below as well.
*/
-#define SPI_MODE_USER_MASK (_BITUL(17) - 1)
+#define SPI_MODE_USER_MASK (_BITUL(18) - 1)
#endif /* _UAPI_SPI_H */
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 7cab2c65d3d7..2f2ee82d5517 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -154,6 +154,7 @@ struct statx {
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
+#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 7837ba4fe728..2ec6f35cda32 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -27,8 +27,13 @@
union { \
struct { MEMBERS } ATTRS; \
struct TAG { MEMBERS } ATTRS NAME; \
- }
+ } ATTRS
+#ifdef __cplusplus
+/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
+#define __DECLARE_FLEX_ARRAY(T, member) \
+ T member[0]
+#else
/**
* __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
*
@@ -45,3 +50,9 @@
TYPE NAME[]; \
}
#endif
+
+#ifndef __counted_by
+#define __counted_by(m)
+#endif
+
+#endif /* _UAPI_LINUX_STDDEF_H */
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index 7e42a5b7558b..ff1f38889dcf 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -56,7 +56,7 @@ struct sync_fence_info {
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
* @flags: sync_file_info flags
- * @num_fences number of fences in the sync_file
+ * @num_fences: number of fences in the sync_file
* @pad: padding for 64-bit alignment, should always be zero
* @sync_fence_info: pointer to array of struct &sync_fence_info with all
* fences in the sync_file
@@ -76,6 +76,27 @@ struct sync_file_info {
__u64 sync_fence_info;
};
+/**
+ * struct sync_set_deadline - SYNC_IOC_SET_DEADLINE - set a deadline hint on a fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad: must be zero
+ *
+ * Allows userspace to set a deadline on a fence, see &dma_fence_set_deadline
+ *
+ * The timebase for the deadline is CLOCK_MONOTONIC (same as vblank). For
+ * example
+ *
+ * clock_gettime(CLOCK_MONOTONIC, &t);
+ * deadline_ns = (t.tv_sec * 1000000000L) + t.tv_nsec + ns_until_deadline
+ */
+struct sync_set_deadline {
+ __u64 deadline_ns;
+ /* Not strictly needed for alignment but gives some possibility
+ * for future extension:
+ */
+ __u64 pad;
+};
+
#define SYNC_IOC_MAGIC '>'
/*
@@ -87,5 +108,6 @@ struct sync_file_info {
#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
+#define SYNC_IOC_SET_DEADLINE _IOW(SYNC_IOC_MAGIC, 5, struct sync_set_deadline)
#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/include/uapi/linux/tc_act/tc_ipt.h b/include/uapi/linux/tc_act/tc_ipt.h
deleted file mode 100644
index c48d7da6750d..000000000000
--- a/include/uapi/linux/tc_act/tc_ipt.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __LINUX_TC_IPT_H
-#define __LINUX_TC_IPT_H
-
-#include <linux/pkt_cls.h>
-
-enum {
- TCA_IPT_UNSPEC,
- TCA_IPT_TABLE,
- TCA_IPT_HOOK,
- TCA_IPT_INDEX,
- TCA_IPT_CNT,
- TCA_IPT_TM,
- TCA_IPT_TARG,
- TCA_IPT_PAD,
- __TCA_IPT_MAX
-};
-#define TCA_IPT_MAX (__TCA_IPT_MAX - 1)
-
-#endif
diff --git a/include/uapi/linux/tc_act/tc_mirred.h b/include/uapi/linux/tc_act/tc_mirred.h
index 2500a0005d05..c61e76f3c23b 100644
--- a/include/uapi/linux/tc_act/tc_mirred.h
+++ b/include/uapi/linux/tc_act/tc_mirred.h
@@ -21,6 +21,7 @@ enum {
TCA_MIRRED_TM,
TCA_MIRRED_PARMS,
TCA_MIRRED_PAD,
+ TCA_MIRRED_BLOCKID,
__TCA_MIRRED_MAX
};
#define TCA_MIRRED_MAX (__TCA_MIRRED_MAX - 1)
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 879eeb0a084b..c07e9f90c084 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -129,6 +129,11 @@ enum {
#define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */
+#define TCP_AO_ADD_KEY 38 /* Add/Set MKT */
+#define TCP_AO_DEL_KEY 39 /* Delete MKT */
+#define TCP_AO_INFO 40 /* Set/list TCP-AO per-socket options */
+#define TCP_AO_GET_KEYS 41 /* List MKT(s) */
+#define TCP_AO_REPAIR 42 /* Get/Set SNEs and ISNs */
#define TCP_REPAIR_ON 1
#define TCP_REPAIR_OFF 0
@@ -170,6 +175,7 @@ enum tcp_fastopen_client_fail {
#define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */
#define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
#define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */
+#define TCPI_OPT_USEC_TS 64 /* usec timestamps */
/*
* Sender's congestion state indicating normal or abnormal situations
@@ -289,6 +295,18 @@ struct tcp_info {
*/
__u32 tcpi_rehash; /* PLB or timeout triggered rehash attempts */
+
+ __u16 tcpi_total_rto; /* Total number of RTO timeouts, including
+ * SYN/SYN-ACK and recurring timeouts.
+ */
+ __u16 tcpi_total_rto_recoveries; /* Total number of RTO
+ * recoveries, including any
+ * unfinished recovery.
+ */
+ __u32 tcpi_total_rto_time; /* Total time spent in RTO recoveries
+ * in milliseconds, including any
+ * unfinished recovery.
+ */
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
@@ -348,6 +366,106 @@ struct tcp_diag_md5sig {
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN];
};
+#define TCP_AO_MAXKEYLEN 80
+
+#define TCP_AO_KEYF_IFINDEX (1 << 0) /* L3 ifindex for VRF */
+#define TCP_AO_KEYF_EXCLUDE_OPT (1 << 1) /* "Indicates whether TCP
+ * options other than TCP-AO
+ * are included in the MAC
+ * calculation"
+ */
+
+struct tcp_ao_add { /* setsockopt(TCP_AO_ADD_KEY) */
+ struct __kernel_sockaddr_storage addr; /* peer's address for the key */
+ char alg_name[64]; /* crypto hash algorithm to use */
+ __s32 ifindex; /* L3 dev index for VRF */
+ __u32 set_current :1, /* set key as Current_key at once */
+ set_rnext :1, /* request it from peer with RNext_key */
+ reserved :30; /* must be 0 */
+ __u16 reserved2; /* padding, must be 0 */
+ __u8 prefix; /* peer's address prefix */
+ __u8 sndid; /* SendID for outgoing segments */
+ __u8 rcvid; /* RecvID to match for incoming seg */
+ __u8 maclen; /* length of authentication code (hash) */
+ __u8 keyflags; /* see TCP_AO_KEYF_ */
+ __u8 keylen; /* length of ::key */
+ __u8 key[TCP_AO_MAXKEYLEN];
+} __attribute__((aligned(8)));
+
+struct tcp_ao_del { /* setsockopt(TCP_AO_DEL_KEY) */
+ struct __kernel_sockaddr_storage addr; /* peer's address for the key */
+ __s32 ifindex; /* L3 dev index for VRF */
+ __u32 set_current :1, /* corresponding ::current_key */
+ set_rnext :1, /* corresponding ::rnext */
+ del_async :1, /* only valid for listen sockets */
+ reserved :29; /* must be 0 */
+ __u16 reserved2; /* padding, must be 0 */
+ __u8 prefix; /* peer's address prefix */
+ __u8 sndid; /* SendID for outgoing segments */
+ __u8 rcvid; /* RecvID to match for incoming seg */
+ __u8 current_key; /* KeyID to set as Current_key */
+ __u8 rnext; /* KeyID to set as Rnext_key */
+ __u8 keyflags; /* see TCP_AO_KEYF_ */
+} __attribute__((aligned(8)));
+
+struct tcp_ao_info_opt { /* setsockopt(TCP_AO_INFO), getsockopt(TCP_AO_INFO) */
+ /* Here 'in' is for setsockopt(), 'out' is for getsockopt() */
+ __u32 set_current :1, /* in/out: corresponding ::current_key */
+ set_rnext :1, /* in/out: corresponding ::rnext */
+ ao_required :1, /* in/out: don't accept non-AO connects */
+ set_counters :1, /* in: set/clear ::pkt_* counters */
+ accept_icmps :1, /* in/out: accept incoming ICMPs */
+ reserved :27; /* must be 0 */
+ __u16 reserved2; /* padding, must be 0 */
+ __u8 current_key; /* in/out: KeyID of Current_key */
+ __u8 rnext; /* in/out: keyid of RNext_key */
+ __u64 pkt_good; /* in/out: verified segments */
+ __u64 pkt_bad; /* in/out: failed verification */
+ __u64 pkt_key_not_found; /* in/out: could not find a key to verify */
+ __u64 pkt_ao_required; /* in/out: segments missing TCP-AO sign */
+ __u64 pkt_dropped_icmp; /* in/out: ICMPs that were ignored */
+} __attribute__((aligned(8)));
+
+struct tcp_ao_getsockopt { /* getsockopt(TCP_AO_GET_KEYS) */
+ struct __kernel_sockaddr_storage addr; /* in/out: dump keys for peer
+ * with this address/prefix
+ */
+ char alg_name[64]; /* out: crypto hash algorithm */
+ __u8 key[TCP_AO_MAXKEYLEN];
+ __u32 nkeys; /* in: size of the userspace buffer
+ * @optval, measured in @optlen - the
+ * sizeof(struct tcp_ao_getsockopt)
+ * out: number of keys that matched
+ */
+ __u16 is_current :1, /* in: match and dump Current_key,
+ * out: the dumped key is Current_key
+ */
+
+ is_rnext :1, /* in: match and dump RNext_key,
+ * out: the dumped key is RNext_key
+ */
+ get_all :1, /* in: dump all keys */
+ reserved :13; /* padding, must be 0 */
+ __u8 sndid; /* in/out: dump keys with SendID */
+ __u8 rcvid; /* in/out: dump keys with RecvID */
+ __u8 prefix; /* in/out: dump keys with address/prefix */
+ __u8 maclen; /* out: key's length of authentication
+ * code (hash)
+ */
+ __u8 keyflags; /* in/out: see TCP_AO_KEYF_ */
+ __u8 keylen; /* out: length of ::key */
+ __s32 ifindex; /* in/out: L3 dev index for VRF */
+ __u64 pkt_good; /* out: verified segments */
+ __u64 pkt_bad; /* out: segments that failed verification */
+} __attribute__((aligned(8)));
+
+struct tcp_ao_repair { /* {s,g}etsockopt(TCP_AO_REPAIR) */
+ __be32 snt_isn;
+ __be32 rcv_isn;
+ __u32 snd_sne;
+ __u32 rcv_sne;
+} __attribute__((aligned(8)));
+
/* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */
#define TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT 0x1
diff --git a/include/uapi/linux/thp7312.h b/include/uapi/linux/thp7312.h
new file mode 100644
index 000000000000..2b629e05daf9
--- /dev/null
+++ b/include/uapi/linux/thp7312.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * THine THP7312 user space header file.
+ *
+ * Copyright (C) 2021 THine Electronics, Inc.
+ * Copyright (C) 2023 Ideas on Board Oy
+ */
+
+#ifndef __UAPI_THP7312_H_
+#define __UAPI_THP7312_H_
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_THP7312_LOW_LIGHT_COMPENSATION (V4L2_CID_USER_THP7312_BASE + 0x01)
+#define V4L2_CID_THP7312_AUTO_FOCUS_METHOD (V4L2_CID_USER_THP7312_BASE + 0x02)
+#define V4L2_CID_THP7312_NOISE_REDUCTION_AUTO (V4L2_CID_USER_THP7312_BASE + 0x03)
+#define V4L2_CID_THP7312_NOISE_REDUCTION_ABSOLUTE (V4L2_CID_USER_THP7312_BASE + 0x04)
+
+#endif /* __UAPI_THP7312_H_ */
diff --git a/include/uapi/linux/tps6594_pfsm.h b/include/uapi/linux/tps6594_pfsm.h
new file mode 100644
index 000000000000..c69569e0a7a2
--- /dev/null
+++ b/include/uapi/linux/tps6594_pfsm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace ABI for TPS6594 PMIC Pre-configurable Finite State Machine
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#ifndef __TPS6594_PFSM_H
+#define __TPS6594_PFSM_H
+
+#include <linux/const.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * struct pmic_state_opt - PMIC state options
+ * @gpio_retention: if enabled, power rails associated with GPIO retention remain active
+ * @ddr_retention: if enabled, power rails associated with DDR retention remain active
+ * @mcu_only_startup_dest: if enabled, startup destination state is MCU_ONLY
+ */
+struct pmic_state_opt {
+ __u8 gpio_retention;
+ __u8 ddr_retention;
+ __u8 mcu_only_startup_dest;
+};
+
+/* Commands */
+#define PMIC_BASE 'P'
+
+#define PMIC_GOTO_STANDBY _IO(PMIC_BASE, 0)
+#define PMIC_GOTO_LP_STANDBY _IO(PMIC_BASE, 1)
+#define PMIC_UPDATE_PGM _IO(PMIC_BASE, 2)
+#define PMIC_SET_ACTIVE_STATE _IO(PMIC_BASE, 3)
+#define PMIC_SET_MCU_ONLY_STATE _IOW(PMIC_BASE, 4, struct pmic_state_opt)
+#define PMIC_SET_RETENTION_STATE _IOW(PMIC_BASE, 5, struct pmic_state_opt)
+
+#endif /* __TPS6594_PFSM_H */
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index 308433be33c2..6375a0684052 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -13,6 +13,10 @@
#include <linux/posix_types.h>
+#ifdef __SIZEOF_INT128__
+typedef __signed__ __int128 __s128 __attribute__((aligned(16)));
+typedef unsigned __int128 __u128 __attribute__((aligned(16)));
+#endif
/*
* Below are truly Linux-specific types that should never collide with
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index 640bf687b94a..b9cfc5c96268 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -47,6 +47,14 @@
_IOWR('u', UBLK_CMD_END_USER_RECOVERY, struct ublksrv_ctrl_cmd)
#define UBLK_U_CMD_GET_DEV_INFO2 \
_IOR('u', UBLK_CMD_GET_DEV_INFO2, struct ublksrv_ctrl_cmd)
+#define UBLK_U_CMD_GET_FEATURES \
+ _IOR('u', 0x13, struct ublksrv_ctrl_cmd)
+
+/*
+ * 64bits are enough now, and it should be easy to extend in case of
+ * running out of feature flags
+ */
+#define UBLK_FEATURES_LEN 8
/*
* IO commands, issued by ublk server, and handled by ublk driver.
@@ -93,9 +101,29 @@
#define UBLKSRV_CMD_BUF_OFFSET 0
#define UBLKSRV_IO_BUF_OFFSET 0x80000000
-/* tag bit is 12bit, so at most 4096 IOs for each queue */
+/* tag bit is 16bit, so far limit at most 4096 IOs for each queue */
#define UBLK_MAX_QUEUE_DEPTH 4096
+/* single IO buffer max size is 32MB */
+#define UBLK_IO_BUF_OFF 0
+#define UBLK_IO_BUF_BITS 25
+#define UBLK_IO_BUF_BITS_MASK ((1ULL << UBLK_IO_BUF_BITS) - 1)
+
+/* so at most 64K IOs for each queue */
+#define UBLK_TAG_OFF UBLK_IO_BUF_BITS
+#define UBLK_TAG_BITS 16
+#define UBLK_TAG_BITS_MASK ((1ULL << UBLK_TAG_BITS) - 1)
+
+/* max 4096 queues */
+#define UBLK_QID_OFF (UBLK_TAG_OFF + UBLK_TAG_BITS)
+#define UBLK_QID_BITS 12
+#define UBLK_QID_BITS_MASK ((1ULL << UBLK_QID_BITS) - 1)
+
+#define UBLK_MAX_NR_QUEUES (1U << UBLK_QID_BITS)
+
+#define UBLKSRV_IO_BUF_TOTAL_BITS (UBLK_QID_OFF + UBLK_QID_BITS)
+#define UBLKSRV_IO_BUF_TOTAL_SIZE (1ULL << UBLKSRV_IO_BUF_TOTAL_BITS)
+
/*
* zero copy requires 4k block size, and can remap ublk driver's io
* request into ublksrv's vm space
@@ -145,6 +173,15 @@
/* use ioctl encoding for uring command */
#define UBLK_F_CMD_IOCTL_ENCODE (1UL << 6)
+/* Copy between request and user buffer by pread()/pwrite() */
+#define UBLK_F_USER_COPY (1UL << 7)
+
+/*
+ * User space sets this flag when setting up the device to request zoned storage support. Kernel may
+ * deny the request by returning an error.
+ */
+#define UBLK_F_ZONED (1ULL << 8)
+
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
@@ -201,9 +238,27 @@ struct ublksrv_ctrl_dev_info {
#define UBLK_IO_OP_READ 0
#define UBLK_IO_OP_WRITE 1
#define UBLK_IO_OP_FLUSH 2
-#define UBLK_IO_OP_DISCARD 3
-#define UBLK_IO_OP_WRITE_SAME 4
-#define UBLK_IO_OP_WRITE_ZEROES 5
+#define UBLK_IO_OP_DISCARD 3
+#define UBLK_IO_OP_WRITE_SAME 4
+#define UBLK_IO_OP_WRITE_ZEROES 5
+#define UBLK_IO_OP_ZONE_OPEN 10
+#define UBLK_IO_OP_ZONE_CLOSE 11
+#define UBLK_IO_OP_ZONE_FINISH 12
+#define UBLK_IO_OP_ZONE_APPEND 13
+#define UBLK_IO_OP_ZONE_RESET_ALL 14
+#define UBLK_IO_OP_ZONE_RESET 15
+/*
+ * Construct a zone report. The report request is carried in `struct
+ * ublksrv_io_desc`. The `start_sector` field must be the first sector of a zone
+ * and shall indicate the first zone of the report. The `nr_zones` shall
+ * indicate how many zones should be reported at most. The report shall be
+ * delivered as a `struct blk_zone` array. To report fewer zones than requested,
+ * zero the last entry of the returned array.
+ *
+ * Related definitions(blk_zone, blk_zone_cond, blk_zone_type, ...) in
+ * include/uapi/linux/blkzoned.h are part of ublk UAPI.
+ */
+#define UBLK_IO_OP_REPORT_ZONES 18
#define UBLK_IO_F_FAILFAST_DEV (1U << 8)
#define UBLK_IO_F_FAILFAST_TRANSPORT (1U << 9)
@@ -224,7 +279,10 @@ struct ublksrv_io_desc {
/* op: bit 0-7, flags: bit 8-31 */
__u32 op_flags;
- __u32 nr_sectors;
+ union {
+ __u32 nr_sectors;
+ __u32 nr_zones; /* for UBLK_IO_OP_REPORT_ZONES */
+ };
/* start sector for this io */
__u64 start_sector;
@@ -253,11 +311,21 @@ struct ublksrv_io_cmd {
/* io result, it is valid for COMMIT* command only */
__s32 result;
- /*
- * userspace buffer address in ublksrv daemon process, valid for
- * FETCH* command only
- */
- __u64 addr;
+ union {
+ /*
+ * userspace buffer address in ublksrv daemon process, valid for
+ * FETCH* command only
+ *
+ * `addr` should not be used when UBLK_F_USER_COPY is enabled,
+ * because userspace handles data copy by pread()/pwrite() over
+ * /dev/ublkcN. But in case of UBLK_F_ZONED, this union is
+ * re-used to pass back the allocated LBA for
+ * UBLK_IO_OP_ZONE_APPEND which actually depends on
+ * UBLK_F_USER_COPY
+ */
+ __u64 addr;
+ __u64 zone_append_lba;
+ };
};
struct ublk_param_basic {
@@ -300,6 +368,13 @@ struct ublk_param_devt {
__u32 disk_minor;
};
+struct ublk_param_zoned {
+ __u32 max_open_zones;
+ __u32 max_active_zones;
+ __u32 max_zone_append_sectors;
+ __u8 reserved[20];
+};
+
struct ublk_params {
/*
* Total length of parameters, userspace has to set 'len' for both
@@ -311,11 +386,13 @@ struct ublk_params {
#define UBLK_PARAM_TYPE_BASIC (1 << 0)
#define UBLK_PARAM_TYPE_DISCARD (1 << 1)
#define UBLK_PARAM_TYPE_DEVT (1 << 2)
+#define UBLK_PARAM_TYPE_ZONED (1 << 3)
__u32 types; /* types of parameter included */
struct ublk_param_basic basic;
struct ublk_param_discard discard;
struct ublk_param_devt devt;
+ struct ublk_param_zoned zoned;
};
#endif
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index fb0cd24c392c..ce4c83f2e66a 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -15,10 +15,8 @@
/* This is arbitrary.
* From USB 2.0 spec Table 11-13, offset 7, a hub can
* have up to 255 ports. The most yet reported is 10.
- *
- * Current Wireless USB host hardware (Intel i1480 for example) allows
- * up to 22 devices to connect. Upcoming hardware might raise that
- * limit. Because the arrays need to add a bit for hub status data, we
+ * Upcoming hardware might raise that limit.
+ * Because the arrays need to add a bit for hub status data, we
* use 31, so plus one evens out to four bytes.
*/
#define USB_MAXCHILDREN 31
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index b17e3a21b15f..8a147abfc680 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -3,7 +3,7 @@
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
- * Wireless USB 1.0 (spread around). Linux has several APIs in C that
+ * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that
* need these:
*
* - the master/host side Linux-USB kernel driver API;
@@ -14,9 +14,6 @@
* act either as a USB master/host or as a USB slave/device. That means
* the master and slave side APIs benefit from working well together.
*
- * There's also "Wireless USB", using low power short range radios for
- * peripheral interconnection but otherwise building on the USB framework.
- *
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
@@ -376,7 +373,10 @@ struct usb_string_descriptor {
__u8 bLength;
__u8 bDescriptorType;
- __le16 wData[1]; /* UTF-16LE encoded */
+ union {
+ __le16 legacy_padding;
+ __DECLARE_FLEX_ARRAY(__le16, wData); /* UTF-16LE encoded */
+ };
} __attribute__ ((packed));
/* note that "string" zero is special, it holds language codes that
@@ -981,7 +981,11 @@ struct usb_ssp_cap_descriptor {
#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8)
#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12)
__le16 wReserved;
- __le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */
+ union {
+ __le32 legacy_padding;
+ /* list of sublink speed attrib entries */
+ __DECLARE_FLEX_ARRAY(__le32, bmSublinkSpeedAttr);
+ };
#define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */
#define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */
#define USB_SSP_SUBLINK_SPEED_LSE_BPS 0
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index d77ee6b65328..078098e73fd3 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -73,8 +73,10 @@ struct usb_os_desc_header {
struct usb_ext_compat_desc {
__u8 bFirstInterfaceNumber;
__u8 Reserved1;
- __u8 CompatibleID[8];
- __u8 SubCompatibleID[8];
+ __struct_group(/* no tag */, IDs, /* no attrs */,
+ __u8 CompatibleID[8];
+ __u8 SubCompatibleID[8];
+ );
__u8 Reserved2[6];
};
diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h
index c7d2199134d7..f0224a8dc858 100644
--- a/include/uapi/linux/usb/raw_gadget.h
+++ b/include/uapi/linux/usb/raw_gadget.h
@@ -44,6 +44,16 @@ enum usb_raw_event_type {
/* This event is queued when a new control request arrived to ep0. */
USB_RAW_EVENT_CONTROL = 2,
+ /*
+ * These events are queued when the gadget driver is suspended,
+ * resumed, reset, or disconnected. Note that some UDCs (e.g. dwc2)
+ * report a disconnect event instead of a reset.
+ */
+ USB_RAW_EVENT_SUSPEND = 3,
+ USB_RAW_EVENT_RESUME = 4,
+ USB_RAW_EVENT_RESET = 5,
+ USB_RAW_EVENT_DISCONNECT = 6,
+
/* The list might grow in the future. */
};
@@ -54,8 +64,8 @@ enum usb_raw_event_type {
* actual length of the fetched event data.
* @data: A buffer to store the fetched event data.
*
- * Currently the fetched data buffer is empty for USB_RAW_EVENT_CONNECT,
- * and contains struct usb_ctrlrequest for USB_RAW_EVENT_CONTROL.
+ * The fetched event data buffer contains struct usb_ctrlrequest for
+ * USB_RAW_EVENT_CONTROL and is empty for other events.
*/
struct usb_raw_event {
__u32 type;
diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h
index 2984aae4a2b4..f74f3aedd49c 100644
--- a/include/uapi/linux/user_events.h
+++ b/include/uapi/linux/user_events.h
@@ -17,6 +17,15 @@
/* Create dynamic location entry within a 32-bit value */
#define DYN_LOC(offset, size) ((size) << 16 | (offset))
+/* List of supported registration flags */
+enum user_reg_flag {
+ /* Event will not delete upon last reference closing */
+ USER_EVENT_REG_PERSIST = 1U << 0,
+
+ /* This value or above is currently non-ABI */
+ USER_EVENT_REG_MAX = 1U << 1,
+};
+
/*
* Describes an event registration and stores the results of the registration.
* This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum
@@ -33,7 +42,7 @@ struct user_reg {
/* Input: Enable size in bytes at address */
__u8 enable_size;
- /* Input: Flags for future use, set to 0 */
+ /* Input: Flags to use, if any */
__u16 flags;
/* Input: Address to update when enabled */
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 66dd4cd277bd..2841e4ea8f2c 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -39,7 +39,10 @@
UFFD_FEATURE_MINOR_SHMEM | \
UFFD_FEATURE_EXACT_ADDRESS | \
UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
- UFFD_FEATURE_WP_UNPOPULATED)
+ UFFD_FEATURE_WP_UNPOPULATED | \
+ UFFD_FEATURE_POISON | \
+ UFFD_FEATURE_WP_ASYNC | \
+ UFFD_FEATURE_MOVE)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -48,13 +51,16 @@
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE | \
+ (__u64)1 << _UFFDIO_MOVE | \
(__u64)1 << _UFFDIO_WRITEPROTECT | \
- (__u64)1 << _UFFDIO_CONTINUE)
+ (__u64)1 << _UFFDIO_CONTINUE | \
+ (__u64)1 << _UFFDIO_POISON)
#define UFFD_API_RANGE_IOCTLS_BASIC \
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
+ (__u64)1 << _UFFDIO_WRITEPROTECT | \
(__u64)1 << _UFFDIO_CONTINUE | \
- (__u64)1 << _UFFDIO_WRITEPROTECT)
+ (__u64)1 << _UFFDIO_POISON)
/*
* Valid ioctl command number range with this API is from 0x00 to
@@ -69,8 +75,10 @@
#define _UFFDIO_WAKE (0x02)
#define _UFFDIO_COPY (0x03)
#define _UFFDIO_ZEROPAGE (0x04)
+#define _UFFDIO_MOVE (0x05)
#define _UFFDIO_WRITEPROTECT (0x06)
#define _UFFDIO_CONTINUE (0x07)
+#define _UFFDIO_POISON (0x08)
#define _UFFDIO_API (0x3F)
/* userfaultfd ioctl ids */
@@ -87,10 +95,14 @@
struct uffdio_copy)
#define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \
struct uffdio_zeropage)
+#define UFFDIO_MOVE _IOWR(UFFDIO, _UFFDIO_MOVE, \
+ struct uffdio_move)
#define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \
struct uffdio_writeprotect)
#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
struct uffdio_continue)
+#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \
+ struct uffdio_poison)
/* read() structure */
struct uffd_msg {
@@ -210,6 +222,14 @@ struct uffdio_api {
* (i.e. empty ptes). This will be the default behavior for shmem
* & hugetlbfs, so this flag only affects anonymous memory behavior
* when userfault write-protection mode is registered.
+ *
+ * UFFD_FEATURE_WP_ASYNC indicates that userfaultfd write-protection
+ * asynchronous mode is supported in which the write fault is
+ * automatically resolved and write-protection is un-set.
+ * It implies UFFD_FEATURE_WP_UNPOPULATED.
+ *
+ * UFFD_FEATURE_MOVE indicates that the kernel supports moving an
+ * existing page contents from userspace.
*/
#define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0)
#define UFFD_FEATURE_EVENT_FORK (1<<1)
@@ -225,6 +245,9 @@ struct uffdio_api {
#define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
+#define UFFD_FEATURE_POISON (1<<14)
+#define UFFD_FEATURE_WP_ASYNC (1<<15)
+#define UFFD_FEATURE_MOVE (1<<16)
__u64 features;
__u64 ioctls;
@@ -321,6 +344,36 @@ struct uffdio_continue {
__s64 mapped;
};
+struct uffdio_poison {
+ struct uffdio_range range;
+#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0)
+ __u64 mode;
+
+ /*
+ * Fields below here are written by the ioctl and must be at the end:
+ * the copy_from_user will not read past here.
+ */
+ __s64 updated;
+};
+
+struct uffdio_move {
+ __u64 dst;
+ __u64 src;
+ __u64 len;
+ /*
+ * Especially if used to atomically remove memory from the
+ * address space the wake on the dst range is not needed.
+ */
+#define UFFDIO_MOVE_MODE_DONTWAKE ((__u64)1<<0)
+#define UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES ((__u64)1<<1)
+ __u64 mode;
+ /*
+ * "move" is written by the ioctl and must be at the end: the
+ * copy_from_user will not read the last 8 bytes.
+ */
+ __s64 move;
+};
+
/*
* Flags for the userfaultfd(2) system call itself.
*/
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 5e80daa4ffe0..99c3f5e99da7 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -203,6 +203,18 @@ enum v4l2_colorfx {
*/
#define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0)
+/*
+ * The base for Nuvoton NPCM driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0)
+
+/*
+ * The base for THine THP7312 driver controls.
+ * We reserve 32 controls for this driver.
+ */
+#define V4L2_CID_USER_THP7312_BASE (V4L2_CID_USER_BASE + 0x11c0)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
@@ -804,6 +816,88 @@ enum v4l2_mpeg_video_frame_skip_mode {
#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY (V4L2_CID_CODEC_BASE + 653)
#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_BASE + 654)
+#define V4L2_CID_MPEG_VIDEO_AV1_PROFILE (V4L2_CID_CODEC_BASE + 655)
+/**
+ * enum v4l2_mpeg_video_av1_profile - AV1 profiles
+ *
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN: compliant decoders must be able to decode
+ * streams with seq_profile equal to 0.
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH: compliant decoders must be able to decode
+ * streams with seq_profile equal less than or equal to 1.
+ * @V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL: compliant decoders must be able to
+ * decode streams with seq_profile less than or equal to 2.
+ *
+ * Conveys the highest profile a decoder can work with.
+ */
+enum v4l2_mpeg_video_av1_profile {
+ V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN = 0,
+ V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH = 1,
+ V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL = 2,
+};
+
+#define V4L2_CID_MPEG_VIDEO_AV1_LEVEL (V4L2_CID_CODEC_BASE + 656)
+/**
+ * enum v4l2_mpeg_video_av1_level - AV1 levels
+ *
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_0: Level 2.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_1: Level 2.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_2: Level 2.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_2_3: Level 2.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_0: Level 3.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_1: Level 3.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_2: Level 3.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_3_3: Level 3.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_0: Level 4.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_1: Level 4.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_2: Level 4.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_4_3: Level 4.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_0: Level 5.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_1: Level 5.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_2: Level 5.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_5_3: Level 5.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_0: Level 6.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_1: Level 6.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_2: Level 6.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_6_3: Level 6.3.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_0: Level 7.0.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_1: Level 7.1.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_2: Level 7.2.
+ * @V4L2_MPEG_VIDEO_AV1_LEVEL_7_3: Level 7.3.
+ *
+ * Conveys the highest level a decoder can work with.
+ */
+enum v4l2_mpeg_video_av1_level {
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_0 = 0,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_1 = 1,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_2 = 2,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_2_3 = 3,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_0 = 4,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_1 = 5,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_2 = 6,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_3_3 = 7,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_0 = 8,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_1 = 9,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_2 = 10,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_4_3 = 11,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_0 = 12,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_1 = 13,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_2 = 14,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_5_3 = 15,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_0 = 16,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_1 = 17,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_2 = 18,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_6_3 = 19,
+
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_0 = 20,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_1 = 21,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_2 = 22,
+ V4L2_MPEG_VIDEO_AV1_LEVEL_7_3 = 23
+};
+
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
#define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000)
#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0)
@@ -2385,6 +2479,9 @@ struct v4l2_ctrl_hevc_slice_params {
* @poc_st_curr_after: provides the index of the short term after references
* in DPB array
* @poc_lt_curr: provides the index of the long term references in DPB array
+ * @num_delta_pocs_of_ref_rps_idx: same as the derived value NumDeltaPocs[RefRpsIdx],
+ * can be used to parse the RPS data in slice headers
+ * instead of skipping it with @short_term_ref_pic_set_size.
* @reserved: padding field. Should be zeroed by applications.
* @dpb: the decoded picture buffer, for meta-data about reference frames
* @flags: see V4L2_HEVC_DECODE_PARAM_FLAG_{}
@@ -2400,7 +2497,8 @@ struct v4l2_ctrl_hevc_decode_params {
__u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
- __u8 reserved[4];
+ __u8 num_delta_pocs_of_ref_rps_idx;
+ __u8 reserved[3];
struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u64 flags;
};
@@ -2754,6 +2852,645 @@ struct v4l2_ctrl_vp9_compressed_hdr {
struct v4l2_vp9_mv_probs mv;
};
+/* Stateless AV1 controls */
+
+#define V4L2_AV1_TOTAL_REFS_PER_FRAME 8
+#define V4L2_AV1_CDEF_MAX 8
+#define V4L2_AV1_NUM_PLANES_MAX 3 /* 1 if monochrome, 3 otherwise */
+#define V4L2_AV1_MAX_SEGMENTS 8
+#define V4L2_AV1_MAX_OPERATING_POINTS (1 << 5) /* 5 bits to encode */
+#define V4L2_AV1_REFS_PER_FRAME 7
+#define V4L2_AV1_MAX_NUM_Y_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_MAX_NUM_CB_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_MAX_NUM_CR_POINTS (1 << 4) /* 4 bits to encode */
+#define V4L2_AV1_AR_COEFFS_SIZE 25 /* (2 * 3 * (3 + 1)) + 1 */
+#define V4L2_AV1_MAX_NUM_PLANES 3
+#define V4L2_AV1_MAX_TILE_COLS 64
+#define V4L2_AV1_MAX_TILE_ROWS 64
+#define V4L2_AV1_MAX_TILE_COUNT 512
+
+#define V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE 0x00000001
+#define V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK 0x00000002
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA 0x00000004
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER 0x00000008
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND 0x00000010
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND 0x00000020
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION 0x00000040
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER 0x00000080
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT 0x00000100
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP 0x00000200
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS 0x00000400
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES 0x00000800
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF 0x00001000
+#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION 0x00002000
+#define V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME 0x00004000
+#define V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE 0x00008000
+#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X 0x00010000
+#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y 0x00020000
+#define V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT 0x00040000
+#define V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q 0x00080000
+
+#define V4L2_CID_STATELESS_AV1_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE + 500)
+/**
+ * struct v4l2_ctrl_av1_sequence - AV1 Sequence
+ *
+ * Represents an AV1 Sequence OBU. See section 5.5 "Sequence header OBU syntax"
+ * for more details.
+ *
+ * @flags: See V4L2_AV1_SEQUENCE_FLAG_{}.
+ * @seq_profile: specifies the features that can be used in the coded video
+ * sequence.
+ * @order_hint_bits: specifies the number of bits used for the order_hint field
+ * at each frame.
+ * @bit_depth: the bitdepth to use for the sequence as described in section
+ * 5.5.2 "Color config syntax".
+ * @reserved: padding field. Should be zeroed by applications.
+ * @max_frame_width_minus_1: specifies the maximum frame width minus 1 for the
+ * frames represented by this sequence header.
+ * @max_frame_height_minus_1: specifies the maximum frame height minus 1 for the
+ * frames represented by this sequence header.
+ */
+struct v4l2_ctrl_av1_sequence {
+ __u32 flags;
+ __u8 seq_profile;
+ __u8 order_hint_bits;
+ __u8 bit_depth;
+ __u8 reserved;
+ __u16 max_frame_width_minus_1;
+ __u16 max_frame_height_minus_1;
+};
+
+#define V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY (V4L2_CID_CODEC_STATELESS_BASE + 501)
+/**
+ * struct v4l2_ctrl_av1_tile_group_entry - AV1 Tile Group entry
+ *
+ * Represents a single AV1 tile inside an AV1 Tile Group. Note that MiRowStart,
+ * MiRowEnd, MiColStart and MiColEnd can be retrieved from struct
+ * v4l2_av1_tile_info in struct v4l2_ctrl_av1_frame using tile_row and
+ * tile_col. See section 6.10.1 "General tile group OBU semantics" for more
+ * details.
+ *
+ * @tile_offset: offset from the OBU data, i.e. where the coded tile data
+ * actually starts.
+ * @tile_size: specifies the size in bytes of the coded tile. Equivalent to
+ * "TileSize" in the AV1 Specification.
+ * @tile_row: specifies the row of the current tile. Equivalent to "TileRow" in
+ * the AV1 Specification.
+ * @tile_col: specifies the col of the current tile. Equivalent to "TileCol" in
+ * the AV1 Specification.
+ */
+struct v4l2_ctrl_av1_tile_group_entry {
+ __u32 tile_offset;
+ __u32 tile_size;
+ __u32 tile_row;
+ __u32 tile_col;
+};
+
+/**
+ * enum v4l2_av1_warp_model - AV1 Warp Model as described in section 3
+ * "Symbols and abbreviated terms" of the AV1 Specification.
+ *
+ * @V4L2_AV1_WARP_MODEL_IDENTITY: Warp model is just an identity transform.
+ * @V4L2_AV1_WARP_MODEL_TRANSLATION: Warp model is a pure translation.
+ * @V4L2_AV1_WARP_MODEL_ROTZOOM: Warp model is a rotation + symmetric zoom +
+ * translation.
+ * @V4L2_AV1_WARP_MODEL_AFFINE: Warp model is a general affine transform.
+ */
+enum v4l2_av1_warp_model {
+ V4L2_AV1_WARP_MODEL_IDENTITY = 0,
+ V4L2_AV1_WARP_MODEL_TRANSLATION = 1,
+ V4L2_AV1_WARP_MODEL_ROTZOOM = 2,
+ V4L2_AV1_WARP_MODEL_AFFINE = 3,
+};
+
+/**
+ * enum v4l2_av1_reference_frame - AV1 reference frames
+ *
+ * @V4L2_AV1_REF_INTRA_FRAME: Intra Frame Reference
+ * @V4L2_AV1_REF_LAST_FRAME: Last Reference Frame
+ * @V4L2_AV1_REF_LAST2_FRAME: Last2 Reference Frame
+ * @V4L2_AV1_REF_LAST3_FRAME: Last3 Reference Frame
+ * @V4L2_AV1_REF_GOLDEN_FRAME: Golden Reference Frame
+ * @V4L2_AV1_REF_BWDREF_FRAME: BWD Reference Frame
+ * @V4L2_AV1_REF_ALTREF2_FRAME: Alternative2 Reference Frame
+ * @V4L2_AV1_REF_ALTREF_FRAME: Alternative Reference Frame
+ */
+enum v4l2_av1_reference_frame {
+ V4L2_AV1_REF_INTRA_FRAME = 0,
+ V4L2_AV1_REF_LAST_FRAME = 1,
+ V4L2_AV1_REF_LAST2_FRAME = 2,
+ V4L2_AV1_REF_LAST3_FRAME = 3,
+ V4L2_AV1_REF_GOLDEN_FRAME = 4,
+ V4L2_AV1_REF_BWDREF_FRAME = 5,
+ V4L2_AV1_REF_ALTREF2_FRAME = 6,
+ V4L2_AV1_REF_ALTREF_FRAME = 7,
+};
+
+#define V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) (1 << (ref))
+
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_GLOBAL 0x1
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_ROT_ZOOM 0x2
+#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_TRANSLATION 0x4
+/**
+ * struct v4l2_av1_global_motion - AV1 Global Motion parameters as described in
+ * section 6.8.17 "Global motion params semantics" of the AV1 specification.
+ *
+ * @flags: A bitfield containing the flags per reference frame. See
+ * V4L2_AV1_GLOBAL_MOTION_FLAG_{}
+ * @type: The type of global motion transform used.
+ * @params: this field has the same meaning as "gm_params" in the AV1
+ * specification.
+ * @invalid: bitfield indicating whether the global motion params are invalid
+ * for a given reference frame. See section 7.11.3.6 Setup shear process and
+ * the variable "warpValid". Use V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) to
+ * create a suitable mask.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+
+struct v4l2_av1_global_motion {
+ __u8 flags[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ enum v4l2_av1_warp_model type[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s32 params[V4L2_AV1_TOTAL_REFS_PER_FRAME][6];
+ __u8 invalid;
+ __u8 reserved[3];
+};
+
+/**
+ * enum v4l2_av1_frame_restoration_type - AV1 Frame Restoration Type
+ * @V4L2_AV1_FRAME_RESTORE_NONE: no filtering is applied.
+ * @V4L2_AV1_FRAME_RESTORE_WIENER: Wiener filter process is invoked.
+ * @V4L2_AV1_FRAME_RESTORE_SGRPROJ: self guided filter process is invoked.
+ * @V4L2_AV1_FRAME_RESTORE_SWITCHABLE: restoration filter is swichtable.
+ */
+enum v4l2_av1_frame_restoration_type {
+ V4L2_AV1_FRAME_RESTORE_NONE = 0,
+ V4L2_AV1_FRAME_RESTORE_WIENER = 1,
+ V4L2_AV1_FRAME_RESTORE_SGRPROJ = 2,
+ V4L2_AV1_FRAME_RESTORE_SWITCHABLE = 3,
+};
+
+#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR 0x1
+#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_CHROMA_LR 0x2
+
+/**
+ * struct v4l2_av1_loop_restoration - AV1 Loop Restauration as described in
+ * section 6.10.15 "Loop restoration params semantics" of the AV1 specification.
+ *
+ * @flags: See V4L2_AV1_LOOP_RESTORATION_FLAG_{}.
+ * @lr_unit_shift: specifies if the luma restoration size should be halved.
+ * @lr_uv_shift: specifies if the chroma size should be half the luma size.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @frame_restoration_type: specifies the type of restoration used for each
+ * plane. See enum v4l2_av1_frame_restoration_type.
+ * @loop_restoration_size: specifies the size of loop restoration units in units
+ * of samples in the current plane.
+ */
+struct v4l2_av1_loop_restoration {
+ __u8 flags;
+ __u8 lr_unit_shift;
+ __u8 lr_uv_shift;
+ __u8 reserved;
+ enum v4l2_av1_frame_restoration_type frame_restoration_type[V4L2_AV1_NUM_PLANES_MAX];
+ __u32 loop_restoration_size[V4L2_AV1_MAX_NUM_PLANES];
+};
+
+/**
+ * struct v4l2_av1_cdef - AV1 CDEF params semantics as described in section
+ * 6.10.14 "CDEF params semantics" of the AV1 specification
+ *
+ * @damping_minus_3: controls the amount of damping in the deringing filter.
+ * @bits: specifies the number of bits needed to specify which CDEF filter to
+ * apply.
+ * @y_pri_strength: specifies the strength of the primary filter.
+ * @y_sec_strength: specifies the strength of the secondary filter.
+ * @uv_pri_strength: specifies the strength of the primary filter.
+ * @uv_sec_strength: specifies the strength of the secondary filter.
+ */
+struct v4l2_av1_cdef {
+ __u8 damping_minus_3;
+ __u8 bits;
+ __u8 y_pri_strength[V4L2_AV1_CDEF_MAX];
+ __u8 y_sec_strength[V4L2_AV1_CDEF_MAX];
+ __u8 uv_pri_strength[V4L2_AV1_CDEF_MAX];
+ __u8 uv_sec_strength[V4L2_AV1_CDEF_MAX];
+};
+
+#define V4L2_AV1_SEGMENTATION_FLAG_ENABLED 0x1
+#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP 0x2
+#define V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x4
+#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_DATA 0x8
+#define V4L2_AV1_SEGMENTATION_FLAG_SEG_ID_PRE_SKIP 0x10
+
+/**
+ * enum v4l2_av1_segment_feature - AV1 segment features as described in section
+ * 3 "Symbols and abbreviated terms" of the AV1 specification.
+ *
+ * @V4L2_AV1_SEG_LVL_ALT_Q: Index for quantizer segment feature.
+ * @V4L2_AV1_SEG_LVL_ALT_LF_Y_V: Index for vertical luma loop filter segment
+ * feature.
+ * @V4L2_AV1_SEG_LVL_REF_FRAME: Index for reference frame segment feature.
+ * @V4L2_AV1_SEG_LVL_REF_SKIP: Index for skip segment feature.
+ * @V4L2_AV1_SEG_LVL_REF_GLOBALMV: Index for global mv feature.
+ * @V4L2_AV1_SEG_LVL_MAX: Number of segment features.
+ */
+enum v4l2_av1_segment_feature {
+ V4L2_AV1_SEG_LVL_ALT_Q = 0,
+ V4L2_AV1_SEG_LVL_ALT_LF_Y_V = 1,
+ V4L2_AV1_SEG_LVL_REF_FRAME = 5,
+ V4L2_AV1_SEG_LVL_REF_SKIP = 6,
+ V4L2_AV1_SEG_LVL_REF_GLOBALMV = 7,
+ V4L2_AV1_SEG_LVL_MAX = 8
+};
+
+#define V4L2_AV1_SEGMENT_FEATURE_ENABLED(id) (1 << (id))
+
+/**
+ * struct v4l2_av1_segmentation - AV1 Segmentation params as defined in section
+ * 6.8.13 "Segmentation params semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_SEGMENTATION_FLAG_{}.
+ * @last_active_seg_id: indicates the highest numbered segment id that has some
+ * enabled feature. This is used when decoding the segment id to only decode
+ * choices corresponding to used segments.
+ * @feature_enabled: bitmask defining which features are enabled in each
+ * segment. Use V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask.
+ * @feature_data: data attached to each feature. Data entry is only valid if the
+ * feature is enabled
+ */
+struct v4l2_av1_segmentation {
+ __u8 flags;
+ __u8 last_active_seg_id;
+ __u8 feature_enabled[V4L2_AV1_MAX_SEGMENTS];
+ __s16 feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX];
+};
+
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT 0x4
+#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI 0x8
+
+/**
+ * struct v4l2_av1_loop_filter - AV1 Loop filter params as defined in section
+ * 6.8.10 "Loop filter semantics" and 6.8.16 "Loop filter delta parameters
+ * semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_LOOP_FILTER_FLAG_{}
+ * @level: an array containing loop filter strength values. Different loop
+ * filter strength values from the array are used depending on the image plane
+ * being filtered, and the edge direction (vertical or horizontal) being
+ * filtered.
+ * @sharpness: indicates the sharpness level. The loop_filter_level and
+ * loop_filter_sharpness together determine when a block edge is filtered, and
+ * by how much the filtering can change the sample values. The loop filter
+ * process is described in section 7.14 of the AV1 specification.
+ * @ref_deltas: contains the adjustment needed for the filter level based on the
+ * chosen reference frame. If this syntax element is not present, it maintains
+ * its previous value.
+ * @mode_deltas: contains the adjustment needed for the filter level based on
+ * the chosen mode. If this syntax element is not present, it maintains its
+ * previous value.
+ * @delta_lf_res: specifies the left shift which should be applied to decoded
+ * loop filter delta values.
+ */
+struct v4l2_av1_loop_filter {
+ __u8 flags;
+ __u8 level[4];
+ __u8 sharpness;
+ __s8 ref_deltas[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s8 mode_deltas[2];
+ __u8 delta_lf_res;
+};
+
+#define V4L2_AV1_QUANTIZATION_FLAG_DIFF_UV_DELTA 0x1
+#define V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX 0x2
+#define V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT 0x4
+
+/**
+ * struct v4l2_av1_quantization - AV1 Quantization params as defined in section
+ * 6.8.11 "Quantization params semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_QUANTIZATION_FLAG_{}
+ * @base_q_idx: indicates the base frame qindex. This is used for Y AC
+ * coefficients and as the base value for the other quantizers.
+ * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx.
+ * @delta_q_u_dc: indicates the U DC quantizer relative to base_q_idx.
+ * @delta_q_u_ac: indicates the U AC quantizer relative to base_q_idx.
+ * @delta_q_v_dc: indicates the V DC quantizer relative to base_q_idx.
+ * @delta_q_v_ac: indicates the V AC quantizer relative to base_q_idx.
+ * @qm_y: specifies the level in the quantizer matrix that should be used for
+ * luma plane decoding.
+ * @qm_u: specifies the level in the quantizer matrix that should be used for
+ * chroma U plane decoding.
+ * @qm_v: specifies the level in the quantizer matrix that should be used for
+ * chroma V plane decoding.
+ * @delta_q_res: specifies the left shift which should be applied to decoded
+ * quantizer index delta values.
+ */
+struct v4l2_av1_quantization {
+ __u8 flags;
+ __u8 base_q_idx;
+ __s8 delta_q_y_dc;
+ __s8 delta_q_u_dc;
+ __s8 delta_q_u_ac;
+ __s8 delta_q_v_dc;
+ __s8 delta_q_v_ac;
+ __u8 qm_y;
+ __u8 qm_u;
+ __u8 qm_v;
+ __u8 delta_q_res;
+};
+
+#define V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING 0x1
+
+/**
+ * struct v4l2_av1_tile_info - AV1 Tile info as defined in section 6.8.14 "Tile
+ * info semantics" of the AV1 specification.
+ *
+ * @flags: see V4L2_AV1_TILE_INFO_FLAG_{}
+ * @context_update_tile_id: specifies which tile to use for the CDF update.
+ * @tile_rows: specifies the number of tiles down the frame.
+ * @tile_cols: specifies the number of tiles across the frame.
+ * @mi_col_starts: an array specifying the start column (in units of 4x4 luma
+ * samples) for each tile across the image.
+ * @mi_row_starts: an array specifying the start row (in units of 4x4 luma
+ * samples) for each tile down the image.
+ * @width_in_sbs_minus_1: specifies the width of a tile minus 1 in units of
+ * superblocks.
+ * @height_in_sbs_minus_1: specifies the height of a tile minus 1 in units of
+ * superblocks.
+ * @tile_size_bytes: specifies the number of bytes needed to code each tile
+ * size.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_av1_tile_info {
+ __u8 flags;
+ __u8 context_update_tile_id;
+ __u8 tile_cols;
+ __u8 tile_rows;
+ __u32 mi_col_starts[V4L2_AV1_MAX_TILE_COLS + 1];
+ __u32 mi_row_starts[V4L2_AV1_MAX_TILE_ROWS + 1];
+ __u32 width_in_sbs_minus_1[V4L2_AV1_MAX_TILE_COLS];
+ __u32 height_in_sbs_minus_1[V4L2_AV1_MAX_TILE_ROWS];
+ __u8 tile_size_bytes;
+ __u8 reserved[3];
+};
+
+/**
+ * enum v4l2_av1_frame_type - AV1 Frame Type
+ *
+ * @V4L2_AV1_KEY_FRAME: Key frame
+ * @V4L2_AV1_INTER_FRAME: Inter frame
+ * @V4L2_AV1_INTRA_ONLY_FRAME: Intra-only frame
+ * @V4L2_AV1_SWITCH_FRAME: Switch frame
+ */
+enum v4l2_av1_frame_type {
+ V4L2_AV1_KEY_FRAME = 0,
+ V4L2_AV1_INTER_FRAME = 1,
+ V4L2_AV1_INTRA_ONLY_FRAME = 2,
+ V4L2_AV1_SWITCH_FRAME = 3
+};
+
+/**
+ * enum v4l2_av1_interpolation_filter - AV1 interpolation filter types
+ *
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP: eight tap filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH: eight tap smooth filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP: eight tap sharp filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_BILINEAR: bilinear filter
+ * @V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE: filter selection is signaled at
+ * the block level
+ *
+ * See section 6.8.9 "Interpolation filter semantics" of the AV1 specification
+ * for more details.
+ */
+enum v4l2_av1_interpolation_filter {
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP = 0,
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH = 1,
+ V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP = 2,
+ V4L2_AV1_INTERPOLATION_FILTER_BILINEAR = 3,
+ V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE = 4,
+};
+
+/**
+ * enum v4l2_av1_tx_mode - AV1 Tx mode as described in section 6.8.21 "TX mode
+ * semantics" of the AV1 specification.
+ * @V4L2_AV1_TX_MODE_ONLY_4X4: the inverse transform will use only 4x4
+ * transforms
+ * @V4L2_AV1_TX_MODE_LARGEST: the inverse transform will use the largest
+ * transform size that fits inside the block
+ * @V4L2_AV1_TX_MODE_SELECT: the choice of transform size is specified
+ * explicitly for each block.
+ */
+enum v4l2_av1_tx_mode {
+ V4L2_AV1_TX_MODE_ONLY_4X4 = 0,
+ V4L2_AV1_TX_MODE_LARGEST = 1,
+ V4L2_AV1_TX_MODE_SELECT = 2
+};
+
+#define V4L2_AV1_FRAME_FLAG_SHOW_FRAME 0x00000001
+#define V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME 0x00000002
+#define V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE 0x00000004
+#define V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE 0x00000008
+#define V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS 0x00000010
+#define V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV 0x00000020
+#define V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC 0x00000040
+#define V4L2_AV1_FRAME_FLAG_USE_SUPERRES 0x00000080
+#define V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV 0x00000100
+#define V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE 0x00000200
+#define V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS 0x00000400
+#define V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF 0x00000800
+#define V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION 0x00001000
+#define V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT 0x00002000
+#define V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET 0x00004000
+#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED 0x00008000
+#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT 0x00010000
+#define V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE 0x00020000
+#define V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT 0x00040000
+#define V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING 0x00080000
+
+#define V4L2_CID_STATELESS_AV1_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 502)
+/**
+ * struct v4l2_ctrl_av1_frame - Represents an AV1 Frame Header OBU.
+ *
+ * @tile_info: tile info
+ * @quantization: quantization params
+ * @segmentation: segmentation params
+ * @superres_denom: the denominator for the upscaling ratio.
+ * @loop_filter: loop filter params
+ * @cdef: cdef params
+ * @skip_mode_frame: specifies the frames to use for compound prediction when
+ * skip_mode is equal to 1.
+ * @primary_ref_frame: specifies which reference frame contains the CDF values
+ * and other state that should be loaded at the start of the frame.
+ * @loop_restoration: loop restoration params
+ * @global_motion: global motion params
+ * @flags: see V4L2_AV1_FRAME_FLAG_{}
+ * @frame_type: specifies the AV1 frame type
+ * @order_hint: specifies OrderHintBits least significant bits of the expected
+ * output order for this frame.
+ * @upscaled_width: the upscaled width.
+ * @interpolation_filter: specifies the filter selection used for performing
+ * inter prediction.
+ * @tx_mode: specifies how the transform size is determined.
+ * @frame_width_minus_1: add 1 to get the frame's width.
+ * @frame_height_minus_1: add 1 to get the frame's height
+ * @render_width_minus_1: add 1 to get the render width of the frame in luma
+ * samples.
+ * @render_height_minus_1: add 1 to get the render height of the frame in luma
+ * samples.
+ * @current_frame_id: specifies the frame id number for the current frame. Frame
+ * id numbers are additional information that do not affect the decoding
+ * process, but provide decoders with a way of detecting missing reference
+ * frames so that appropriate action can be taken.
+ * @buffer_removal_time: specifies the frame removal time in units of DecCT clock
+ * ticks counted from the removal time of the last random access point for
+ * operating point opNum.
+ * @reserved: padding field. Should be zeroed by applications.
+ * @order_hints: specifies the expected output order hint for each reference
+ * frame. This field corresponds to the OrderHints variable from the
+ * specification (section 5.9.2 "Uncompressed header syntax"). As such, this is
+ * only used for non-intra frames and ignored otherwise. order_hints[0] is
+ * always ignored.
+ * @reference_frame_ts: the V4L2 timestamp of the reference frame slots.
+ * @ref_frame_idx: used to index into @reference_frame_ts when decoding
+ * inter-frames. The meaning of this array is the same as in the specification.
+ * The timestamp refers to the timestamp field in struct v4l2_buffer. Use
+ * v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
+ * @refresh_frame_flags: contains a bitmask that specifies which reference frame
+ * slots will be updated with the current frame after it is decoded.
+ */
+struct v4l2_ctrl_av1_frame {
+ struct v4l2_av1_tile_info tile_info;
+ struct v4l2_av1_quantization quantization;
+ __u8 superres_denom;
+ struct v4l2_av1_segmentation segmentation;
+ struct v4l2_av1_loop_filter loop_filter;
+ struct v4l2_av1_cdef cdef;
+ __u8 skip_mode_frame[2];
+ __u8 primary_ref_frame;
+ struct v4l2_av1_loop_restoration loop_restoration;
+ struct v4l2_av1_global_motion global_motion;
+ __u32 flags;
+ enum v4l2_av1_frame_type frame_type;
+ __u32 order_hint;
+ __u32 upscaled_width;
+ enum v4l2_av1_interpolation_filter interpolation_filter;
+ enum v4l2_av1_tx_mode tx_mode;
+ __u32 frame_width_minus_1;
+ __u32 frame_height_minus_1;
+ __u16 render_width_minus_1;
+ __u16 render_height_minus_1;
+
+ __u32 current_frame_id;
+ __u32 buffer_removal_time[V4L2_AV1_MAX_OPERATING_POINTS];
+ __u8 reserved[4];
+ __u32 order_hints[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __u64 reference_frame_ts[V4L2_AV1_TOTAL_REFS_PER_FRAME];
+ __s8 ref_frame_idx[V4L2_AV1_REFS_PER_FRAME];
+ __u8 refresh_frame_flags;
+};
+
+#define V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN 0x1
+#define V4L2_AV1_FILM_GRAIN_FLAG_UPDATE_GRAIN 0x2
+#define V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA 0x4
+#define V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP 0x8
+#define V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE 0x10
+
+#define V4L2_CID_STATELESS_AV1_FILM_GRAIN (V4L2_CID_CODEC_STATELESS_BASE + 505)
+/**
+ * struct v4l2_ctrl_av1_film_grain - AV1 Film Grain parameters.
+ *
+ * Film grain parameters as specified by section 6.8.20 of the AV1 Specification.
+ *
+ * @flags: see V4L2_AV1_FILM_GRAIN_{}.
+ * @cr_mult: represents a multiplier for the cr component used in derivation of
+ * the input index to the cr component scaling function.
+ * @grain_seed: specifies the starting value for the pseudo-random numbers used
+ * during film grain synthesis.
+ * @film_grain_params_ref_idx: indicates which reference frame contains the
+ * film grain parameters to be used for this frame.
+ * @num_y_points: specifies the number of points for the piece-wise linear
+ * scaling function of the luma component.
+ * @point_y_value: represents the x (luma value) coordinate for the i-th point
+ * of the piecewise linear scaling function for luma component. The values are
+ * signaled on the scale of 0..255. In case of 10 bit video, these values
+ * correspond to luma values divided by 4. In case of 12 bit video, these values
+ * correspond to luma values divided by 16.
+ * @point_y_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for luma component.
+ * @num_cb_points: specifies the number of points for the piece-wise linear
+ * scaling function of the cb component.
+ * @point_cb_value: represents the x coordinate for the i-th point of the
+ * piece-wise linear scaling function for cb component. The values are signaled
+ * on the scale of 0..255.
+ * @point_cb_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for cb component.
+ * @num_cr_points: specifies represents the number of points for the piece-wise
+ * linear scaling function of the cr component.
+ * @point_cr_value: represents the x coordinate for the i-th point of the
+ * piece-wise linear scaling function for cr component. The values are signaled
+ * on the scale of 0..255.
+ * @point_cr_scaling: represents the scaling (output) value for the i-th point
+ * of the piecewise linear scaling function for cr component.
+ * @grain_scaling_minus_8: represents the shift – 8 applied to the values of the
+ * chroma component. The grain_scaling_minus_8 can take values of 0..3 and
+ * determines the range and quantization step of the standard deviation of film
+ * grain.
+ * @ar_coeff_lag: specifies the number of auto-regressive coefficients for luma
+ * and chroma.
+ * @ar_coeffs_y_plus_128: specifies auto-regressive coefficients used for the Y
+ * plane.
+ * @ar_coeffs_cb_plus_128: specifies auto-regressive coefficients used for the U
+ * plane.
+ * @ar_coeffs_cr_plus_128: specifies auto-regressive coefficients used for the V
+ * plane.
+ * @ar_coeff_shift_minus_6: specifies the range of the auto-regressive
+ * coefficients. Values of 0, 1, 2, and 3 correspond to the ranges for
+ * auto-regressive coefficients of [-2, 2), [-1, 1), [-0.5, 0.5) and [-0.25,
+ * 0.25) respectively.
+ * @grain_scale_shift: specifies how much the Gaussian random numbers should be
+ * scaled down during the grain synthesis process.
+ * @cb_mult: represents a multiplier for the cb component used in derivation of
+ * the input index to the cb component scaling function.
+ * @cb_luma_mult: represents a multiplier for the average luma component used in
+ * derivation of the input index to the cb component scaling function.
+ * @cr_luma_mult: represents a multiplier for the average luma component used in
+ * derivation of the input index to the cr component scaling function.
+ * @cb_offset: represents an offset used in derivation of the input index to the
+ * cb component scaling function.
+ * @cr_offset: represents an offset used in derivation of the input index to the
+ * cr component scaling function.
+ * @reserved: padding field. Should be zeroed by applications.
+ */
+struct v4l2_ctrl_av1_film_grain {
+ __u8 flags;
+ __u8 cr_mult;
+ __u16 grain_seed;
+ __u8 film_grain_params_ref_idx;
+ __u8 num_y_points;
+ __u8 point_y_value[V4L2_AV1_MAX_NUM_Y_POINTS];
+ __u8 point_y_scaling[V4L2_AV1_MAX_NUM_Y_POINTS];
+ __u8 num_cb_points;
+ __u8 point_cb_value[V4L2_AV1_MAX_NUM_CB_POINTS];
+ __u8 point_cb_scaling[V4L2_AV1_MAX_NUM_CB_POINTS];
+ __u8 num_cr_points;
+ __u8 point_cr_value[V4L2_AV1_MAX_NUM_CR_POINTS];
+ __u8 point_cr_scaling[V4L2_AV1_MAX_NUM_CR_POINTS];
+ __u8 grain_scaling_minus_8;
+ __u8 ar_coeff_lag;
+ __u8 ar_coeffs_y_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeffs_cb_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeffs_cr_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
+ __u8 ar_coeff_shift_minus_6;
+ __u8 grain_scale_shift;
+ __u8 cb_mult;
+ __u8 cb_luma_mult;
+ __u8 cr_luma_mult;
+ __u16 cb_offset;
+ __u16 cr_offset;
+ __u8 reserved[4];
+};
+
/* MPEG-compression definitions kept for backwards compatibility */
#ifndef __KERNEL__
#define V4L2_CTRL_CLASS_MPEG V4L2_CTRL_CLASS_CODEC
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index 4a195b68f28f..7048c51581c6 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -116,13 +116,15 @@ struct v4l2_subdev_frame_size_enum {
* @pad: pad number, as reported by the media API
* @interval: frame interval in seconds
* @stream: stream number, defined in subdev routing
+ * @which: interval type (from enum v4l2_subdev_format_whence)
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval {
__u32 pad;
struct v4l2_fract interval;
__u32 stream;
- __u32 reserved[8];
+ __u32 which;
+ __u32 reserved[7];
};
/**
@@ -133,7 +135,7 @@ struct v4l2_subdev_frame_interval {
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
- * @which: format type (from enum v4l2_subdev_format_whence)
+ * @which: interval type (from enum v4l2_subdev_format_whence)
* @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
@@ -239,7 +241,14 @@ struct v4l2_subdev_routing {
* set (which is the default), the 'stream' fields will be forced to 0 by the
* kernel.
*/
- #define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1U << 0)
+#define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
+
+/*
+ * The client is aware of the struct v4l2_subdev_frame_interval which field. If
+ * this is not set (which is the default), the which field is forced to
+ * V4L2_SUBDEV_FORMAT_ACTIVE by the kernel.
+ */
+#define V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH (1ULL << 1)
/**
* struct v4l2_subdev_client_capability - Capabilities of the client accessing
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 0552e8dcf0cb..2b68e6cdf190 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -213,9 +213,11 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 6) /* vfio-fsl-mc device */
#define VFIO_DEVICE_FLAGS_CAPS (1 << 7) /* Info supports caps */
+#define VFIO_DEVICE_FLAGS_CDX (1 << 8) /* vfio-cdx device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
__u32 cap_offset; /* Offset within info struct of first cap */
+ __u32 pad;
};
#define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
@@ -240,6 +242,20 @@ struct vfio_device_info {
#define VFIO_DEVICE_INFO_CAP_ZPCI_UTIL 3
#define VFIO_DEVICE_INFO_CAP_ZPCI_PFIP 4
+/*
+ * The following VFIO_DEVICE_INFO capability reports support for PCIe AtomicOp
+ * completion to the root bus with supported widths provided via flags.
+ */
+#define VFIO_DEVICE_INFO_CAP_PCI_ATOMIC_COMP 5
+struct vfio_device_info_cap_pci_atomic_comp {
+ struct vfio_info_cap_header header;
+ __u32 flags;
+#define VFIO_PCI_ATOMIC_COMP32 (1 << 0)
+#define VFIO_PCI_ATOMIC_COMP64 (1 << 1)
+#define VFIO_PCI_ATOMIC_COMP128 (1 << 2)
+ __u32 reserved;
+};
+
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
* struct vfio_region_info)
@@ -261,8 +277,8 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */
__u32 index; /* Region index */
__u32 cap_offset; /* Offset within info struct of first cap */
- __u64 size; /* Region size (bytes) */
- __u64 offset; /* Region offset from start of device fd */
+ __aligned_u64 size; /* Region size (bytes) */
+ __aligned_u64 offset; /* Region offset from start of device fd */
};
#define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
@@ -278,8 +294,8 @@ struct vfio_region_info {
#define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1
struct vfio_region_sparse_mmap_area {
- __u64 offset; /* Offset of mmap'able area within region */
- __u64 size; /* Size of mmap'able area */
+ __aligned_u64 offset; /* Offset of mmap'able area within region */
+ __aligned_u64 size; /* Size of mmap'able area */
};
struct vfio_region_info_cap_sparse_mmap {
@@ -434,9 +450,9 @@ struct vfio_device_migration_info {
VFIO_DEVICE_STATE_V1_RESUMING)
__u32 reserved;
- __u64 pending_bytes;
- __u64 data_offset;
- __u64 data_size;
+ __aligned_u64 pending_bytes;
+ __aligned_u64 data_offset;
+ __aligned_u64 data_size;
};
/*
@@ -460,7 +476,7 @@ struct vfio_device_migration_info {
struct vfio_region_info_cap_nvlink2_ssatgt {
struct vfio_info_cap_header header;
- __u64 tgt;
+ __aligned_u64 tgt;
};
/*
@@ -511,6 +527,9 @@ struct vfio_region_info_cap_nvlink2_lnkspd {
* then add and unmask vectors, it's up to userspace to make the decision
* whether to allocate the maximum supported number of vectors or tear
* down setup and incrementally increase the vectors as each is enabled.
+ * Absence of the NORESIZE flag indicates that vectors can be enabled
+ * and disabled dynamically without impacting other vectors within the
+ * index.
*/
struct vfio_irq_info {
__u32 argsz;
@@ -646,15 +665,73 @@ enum {
VFIO_CCW_NUM_IRQS
};
+/*
+ * The vfio-ap bus driver makes use of the following IRQ index mapping.
+ * Unimplemented IRQ types return a count of zero.
+ */
+enum {
+ VFIO_AP_REQ_IRQ_INDEX,
+ VFIO_AP_NUM_IRQS
+};
+
/**
* VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
*
+ * This command is used to query the affected devices in the hot reset for
+ * a given device.
+ *
+ * This command always reports the segment, bus, and devfn information for
+ * each affected device, and selectively reports the group_id or devid per
+ * the way how the calling device is opened.
+ *
+ * - If the calling device is opened via the traditional group/container
+ * API, group_id is reported. User should check if it has owned all
+ * the affected devices and provides a set of group fds to prove the
+ * ownership in VFIO_DEVICE_PCI_HOT_RESET ioctl.
+ *
+ * - If the calling device is opened as a cdev, devid is reported.
+ * Flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set to indicate this
+ * data type. All the affected devices should be represented in
+ * the dev_set, ex. bound to a vfio driver, and also be owned by
+ * this interface which is determined by the following conditions:
+ * 1) Has a valid devid within the iommufd_ctx of the calling device.
+ * Ownership cannot be determined across separate iommufd_ctx and
+ * the cdev calling conventions do not support a proof-of-ownership
+ * model as provided in the legacy group interface. In this case
+ * valid devid with value greater than zero is provided in the return
+ * structure.
+ * 2) Does not have a valid devid within the iommufd_ctx of the calling
+ * device, but belongs to the same IOMMU group as the calling device
+ * or another opened device that has a valid devid within the
+ * iommufd_ctx of the calling device. This provides implicit ownership
+ * for devices within the same DMA isolation context. In this case
+ * the devid value of VFIO_PCI_DEVID_OWNED is provided in the return
+ * structure.
+ *
+ * A devid value of VFIO_PCI_DEVID_NOT_OWNED is provided in the return
+ * structure for affected devices where device is NOT represented in the
+ * dev_set or ownership is not available. Such devices prevent the use
+ * of VFIO_DEVICE_PCI_HOT_RESET ioctl outside of the proof-of-ownership
+ * calling conventions (ie. via legacy group accessed devices). Flag
+ * VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED would be set when all the
+ * affected devices are represented in the dev_set and also owned by
+ * the user. This flag is available only when
+ * flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set, otherwise reserved.
+ * When set, user could invoke VFIO_DEVICE_PCI_HOT_RESET with a zero
+ * length fd array on the calling device as the ownership is validated
+ * by iommufd_ctx.
+ *
* Return: 0 on success, -errno on failure:
* -enospc = insufficient buffer, -enodev = unsupported for device.
*/
struct vfio_pci_dependent_device {
- __u32 group_id;
+ union {
+ __u32 group_id;
+ __u32 devid;
+#define VFIO_PCI_DEVID_OWNED 0
+#define VFIO_PCI_DEVID_NOT_OWNED -1
+ };
__u16 segment;
__u8 bus;
__u8 devfn; /* Use PCI_SLOT/PCI_FUNC */
@@ -663,6 +740,8 @@ struct vfio_pci_dependent_device {
struct vfio_pci_hot_reset_info {
__u32 argsz;
__u32 flags;
+#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID (1 << 0)
+#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED (1 << 1)
__u32 count;
struct vfio_pci_dependent_device devices[];
};
@@ -673,6 +752,24 @@ struct vfio_pci_hot_reset_info {
* VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
* struct vfio_pci_hot_reset)
*
+ * A PCI hot reset results in either a bus or slot reset which may affect
+ * other devices sharing the bus/slot. The calling user must have
+ * ownership of the full set of affected devices as determined by the
+ * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO ioctl.
+ *
+ * When called on a device file descriptor acquired through the vfio
+ * group interface, the user is required to provide proof of ownership
+ * of those affected devices via the group_fds array in struct
+ * vfio_pci_hot_reset.
+ *
+ * When called on a direct cdev opened vfio device, the flags field of
+ * struct vfio_pci_hot_reset_info reports the ownership status of the
+ * affected devices and this ioctl must be called with an empty group_fds
+ * array. See above INFO ioctl definition for ownership requirements.
+ *
+ * Mixed usage of legacy groups and cdevs across the set of affected
+ * devices is not supported.
+ *
* Return: 0 on success, -errno on failure.
*/
struct vfio_pci_hot_reset {
@@ -719,7 +816,7 @@ struct vfio_device_gfx_plane_info {
__u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */
/* out */
__u32 drm_format; /* drm format of plane */
- __u64 drm_format_mod; /* tiled mode */
+ __aligned_u64 drm_format_mod; /* tiled mode */
__u32 width; /* width of plane */
__u32 height; /* height of plane */
__u32 stride; /* stride of plane */
@@ -732,6 +829,7 @@ struct vfio_device_gfx_plane_info {
__u32 region_index; /* region index */
__u32 dmabuf_id; /* dma-buf id */
};
+ __u32 reserved;
};
#define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14)
@@ -766,9 +864,10 @@ struct vfio_device_ioeventfd {
#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */
#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */
#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf)
- __u64 offset; /* device fd offset of write */
- __u64 data; /* data to be written */
+ __aligned_u64 offset; /* device fd offset of write */
+ __aligned_u64 data; /* data to be written */
__s32 fd; /* -1 for de-assignment */
+ __u32 reserved;
};
#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16)
@@ -802,6 +901,83 @@ struct vfio_device_feature {
#define VFIO_DEVICE_FEATURE _IO(VFIO_TYPE, VFIO_BASE + 17)
/*
+ * VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
+ * struct vfio_device_bind_iommufd)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ * @iommufd: iommufd to bind.
+ * @out_devid: The device id generated by this bind. devid is a handle for
+ * this device/iommufd bond and can be used in IOMMUFD commands.
+ *
+ * Bind a vfio_device to the specified iommufd.
+ *
+ * User is restricted from accessing the device before the binding operation
+ * is completed. Only allowed on cdev fds.
+ *
+ * Unbind is automatically conducted when device fd is closed.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_bind_iommufd {
+ __u32 argsz;
+ __u32 flags;
+ __s32 iommufd;
+ __u32 out_devid;
+};
+
+#define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18)
+
+/*
+ * VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19,
+ * struct vfio_device_attach_iommufd_pt)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ * @pt_id: Input the target id which can represent an ioas or a hwpt
+ * allocated via iommufd subsystem.
+ * Output the input ioas id or the attached hwpt id which could
+ * be the specified hwpt itself or a hwpt automatically created
+ * for the specified ioas by kernel during the attachment.
+ *
+ * Associate the device with an address space within the bound iommufd.
+ * Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only
+ * allowed on cdev fds.
+ *
+ * If a vfio device is currently attached to a valid hw_pagetable, without doing
+ * a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl
+ * passing in another hw_pagetable (hwpt) id is allowed. This action, also known
+ * as a hw_pagetable replacement, will replace the device's currently attached
+ * hw_pagetable with a new hw_pagetable corresponding to the given pt_id.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_attach_iommufd_pt {
+ __u32 argsz;
+ __u32 flags;
+ __u32 pt_id;
+};
+
+#define VFIO_DEVICE_ATTACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 19)
+
+/*
+ * VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20,
+ * struct vfio_device_detach_iommufd_pt)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ *
+ * Remove the association of the device and its current associated address
+ * space. After it, the device should be in a blocking DMA state. This is only
+ * allowed on cdev fds.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_detach_iommufd_pt {
+ __u32 argsz;
+ __u32 flags;
+};
+
+#define VFIO_DEVICE_DETACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 20)
+
+/*
* Provide support for setting a PCI VF Token, which is used as a shared
* secret between PF and VF drivers. This feature may only be set on a
* PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
@@ -1043,6 +1219,7 @@ enum vfio_device_mig_state {
VFIO_DEVICE_STATE_RUNNING_P2P = 5,
VFIO_DEVICE_STATE_PRE_COPY = 6,
VFIO_DEVICE_STATE_PRE_COPY_P2P = 7,
+ VFIO_DEVICE_STATE_NR,
};
/**
@@ -1260,6 +1437,27 @@ struct vfio_device_feature_mig_data_size {
#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9
+/**
+ * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device
+ * based on the operation specified in op flag.
+ *
+ * The functionality is incorporated for devices that needs bus master control,
+ * but the in-band device interface lacks the support. Consequently, it is not
+ * applicable to PCI devices, as bus master control for PCI devices is managed
+ * in-band through the configuration space. At present, this feature is supported
+ * only for CDX devices.
+ * When the device's BUS MASTER setting is configured as CLEAR, it will result in
+ * blocking all incoming DMA requests from the device. On the other hand, configuring
+ * the device's BUS MASTER setting as SET (enable) will grant the device the
+ * capability to perform DMA to the host memory.
+ */
+struct vfio_device_feature_bus_master {
+ __u32 op;
+#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */
+#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */
+};
+#define VFIO_DEVICE_FEATURE_BUS_MASTER 10
+
/* -------- API for Type1 VFIO IOMMU -------- */
/**
@@ -1275,8 +1473,9 @@ struct vfio_iommu_type1_info {
__u32 flags;
#define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */
- __u64 iova_pgsizes; /* Bitmap of supported page sizes */
+ __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */
__u32 cap_offset; /* Offset within info struct of first cap */
+ __u32 pad;
};
/*
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 92e1b700b51c..649560c685f1 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -45,6 +45,25 @@
#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
/* Specify an eventfd file descriptor to signal on log write. */
#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
+/* By default, a device gets one vhost_worker that its virtqueues share. This
+ * command allows the owner of the device to create an additional vhost_worker
+ * for the device. It can later be bound to 1 or more of its virtqueues using
+ * the VHOST_ATTACH_VRING_WORKER command.
+ *
+ * This must be called after VHOST_SET_OWNER and the caller must be the owner
+ * of the device. The new thread will inherit caller's cgroups and namespaces,
+ * and will share the caller's memory space. The new thread will also be
+ * counted against the caller's RLIMIT_NPROC value.
+ *
+ * The worker's ID used in other commands will be returned in
+ * vhost_worker_state.
+ */
+#define VHOST_NEW_WORKER _IOR(VHOST_VIRTIO, 0x8, struct vhost_worker_state)
+/* Free a worker created with VHOST_NEW_WORKER if it's not attached to any
+ * virtqueue. If userspace is not able to call this for workers its created,
+ * the kernel will free all the device's workers when the device is closed.
+ */
+#define VHOST_FREE_WORKER _IOW(VHOST_VIRTIO, 0x9, struct vhost_worker_state)
/* Ring setup. */
/* Set number of descriptors in ring. This parameter can not
@@ -70,6 +89,18 @@
#define VHOST_VRING_BIG_ENDIAN 1
#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
+/* Attach a vhost_worker created with VHOST_NEW_WORKER to one of the device's
+ * virtqueues.
+ *
+ * This will replace the virtqueue's existing worker. If the replaced worker
+ * is no longer attached to any virtqueues, it can be freed with
+ * VHOST_FREE_WORKER.
+ */
+#define VHOST_ATTACH_VRING_WORKER _IOW(VHOST_VIRTIO, 0x15, \
+ struct vhost_vring_worker)
+/* Return the vring worker's ID */
+#define VHOST_GET_VRING_WORKER _IOWR(VHOST_VIRTIO, 0x16, \
+ struct vhost_vring_worker)
/* The following ioctls use eventfd file descriptors to signal and poll
* for events. */
@@ -188,4 +219,12 @@
*/
#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)
+/* Get the group for the descriptor table including driver & device areas
+ * of a virtqueue: read index, write group in num.
+ * The virtqueue index is stored in the index field of vhost_vring_state.
+ * The group ID of the descriptor table for this specific virtqueue
+ * is returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \
+ struct vhost_vring_state)
#endif
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index c5690a8992d8..d7656908f730 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -47,6 +47,22 @@ struct vhost_vring_addr {
__u64 log_guest_addr;
};
+struct vhost_worker_state {
+ /*
+ * For VHOST_NEW_WORKER the kernel will return the new vhost_worker id.
+ * For VHOST_FREE_WORKER this must be set to the id of the vhost_worker
+ * to free.
+ */
+ unsigned int worker_id;
+};
+
+struct vhost_vring_worker {
+ /* vring index */
+ unsigned int index;
+ /* The id of the vhost_worker returned from VHOST_NEW_WORKER */
+ unsigned int worker_id;
+};
+
/* no alignment requirement */
struct vhost_iotlb_msg {
__u64 iova;
@@ -165,5 +181,16 @@ struct vhost_vdpa_iova_range {
#define VHOST_BACKEND_F_SUSPEND 0x4
/* Device can be resumed */
#define VHOST_BACKEND_F_RESUME 0x5
+/* Device supports the driver enabling virtqueues both before and after
+ * DRIVER_OK
+ */
+#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
+/* Device may expose the virtqueue's descriptor area, driver area and
+ * device area to a different group for ASID binding than where its
+ * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID.
+ */
+#define VHOST_BACKEND_F_DESC_ASID 0x7
+/* IOTLB don't flush memory mapping across device reset */
+#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index aee75eb9e686..68e7ac178cc2 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -672,6 +672,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_NV12_4L4 v4l2_fourcc('V', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 4x4 tiles */
#define V4L2_PIX_FMT_NV12_16L16 v4l2_fourcc('H', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
#define V4L2_PIX_FMT_NV12_32L32 v4l2_fourcc('S', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 32x32 tiles */
+#define V4L2_PIX_FMT_NV15_4L4 v4l2_fourcc('V', 'T', '1', '5') /* 15 Y/CbCr 4:2:0 10-bit 4x4 tiles */
#define V4L2_PIX_FMT_P010_4L4 v4l2_fourcc('T', '0', '1', '0') /* 12 Y/CbCr 4:2:0 10-bit 4x4 macroblocks */
#define V4L2_PIX_FMT_NV12_8L128 v4l2_fourcc('A', 'T', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */
#define V4L2_PIX_FMT_NV12_10BE_8L128 v4l2_fourcc_be('A', 'X', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */
@@ -758,6 +759,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */
#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
+#define V4L2_PIX_FMT_AV1_FRAME v4l2_fourcc('A', 'V', '1', 'F') /* AV1 parsed frame */
#define V4L2_PIX_FMT_SPK v4l2_fourcc('S', 'P', 'K', '0') /* Sorenson Spark */
#define V4L2_PIX_FMT_RV30 v4l2_fourcc('R', 'V', '3', '0') /* RealVideo 8 */
#define V4L2_PIX_FMT_RV40 v4l2_fourcc('R', 'V', '4', '0') /* RealVideo 9 & 10 */
@@ -794,12 +796,15 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */
+#define V4L2_PIX_FMT_MT2110T v4l2_fourcc('M', 'T', '2', 'T') /* Mediatek 10-bit block tile mode */
+#define V4L2_PIX_FMT_MT2110R v4l2_fourcc('M', 'T', '2', 'R') /* Mediatek 10-bit block raster mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */
#define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */
#define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */
#define V4L2_PIX_FMT_AJPG v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */
+#define V4L2_PIX_FMT_HEXTILE v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */
/* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
@@ -1030,6 +1035,7 @@ struct v4l2_requestbuffers {
#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6)
+#define V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS (1 << 7)
/**
* struct v4l2_plane - plane info for multi-planar buffers
@@ -1720,7 +1726,7 @@ struct v4l2_input {
__u8 name[32]; /* Label */
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
- __u32 tuner; /* enum v4l2_tuner_type */
+ __u32 tuner; /* Tuner index */
v4l2_std_id std;
__u32 status;
__u32 capabilities;
@@ -1807,11 +1813,11 @@ struct v4l2_ext_control {
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
- __u32 __user *p_s32;
- __u32 __user *p_s64;
+ __s32 __user *p_s32;
+ __s64 __user *p_s64;
struct v4l2_area __user *p_area;
struct v4l2_ctrl_h264_sps __user *p_h264_sps;
- struct v4l2_ctrl_h264_pps *p_h264_pps;
+ struct v4l2_ctrl_h264_pps __user *p_h264_pps;
struct v4l2_ctrl_h264_scaling_matrix __user *p_h264_scaling_matrix;
struct v4l2_ctrl_h264_pred_weights __user *p_h264_pred_weights;
struct v4l2_ctrl_h264_slice_params __user *p_h264_slice_params;
@@ -1828,6 +1834,12 @@ struct v4l2_ext_control {
struct v4l2_ctrl_hevc_slice_params __user *p_hevc_slice_params;
struct v4l2_ctrl_hevc_scaling_matrix __user *p_hevc_scaling_matrix;
struct v4l2_ctrl_hevc_decode_params __user *p_hevc_decode_params;
+ struct v4l2_ctrl_av1_sequence __user *p_av1_sequence;
+ struct v4l2_ctrl_av1_tile_group_entry __user *p_av1_tile_group_entry;
+ struct v4l2_ctrl_av1_frame __user *p_av1_frame;
+ struct v4l2_ctrl_av1_film_grain __user *p_av1_film_grain;
+ struct v4l2_ctrl_hdr10_cll_info __user *p_hdr10_cll_info;
+ struct v4l2_ctrl_hdr10_mastering_display __user *p_hdr10_mastering_display;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1901,6 +1913,11 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272,
V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273,
V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274,
+
+ V4L2_CTRL_TYPE_AV1_SEQUENCE = 0x280,
+ V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY = 0x281,
+ V4L2_CTRL_TYPE_AV1_FRAME = 0x282,
+ V4L2_CTRL_TYPE_AV1_FILM_GRAIN = 0x283,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
@@ -2591,6 +2608,9 @@ struct v4l2_dbg_chip_info {
* @flags: additional buffer management attributes (ignored unless the
* queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability
* and configured for MMAP streaming I/O).
+ * @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set
+ * this field indicate the maximum possible number of buffers
+ * for this queue.
* @reserved: future extensions
*/
struct v4l2_create_buffers {
@@ -2600,7 +2620,8 @@ struct v4l2_create_buffers {
struct v4l2_format format;
__u32 capabilities;
__u32 flags;
- __u32 reserved[6];
+ __u32 max_num_buffers;
+ __u32 reserved[5];
};
/*
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 2c712c654165..2445f365bce7 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -52,7 +52,7 @@
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 41
+#define VIRTIO_TRANSPORT_F_END 42
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -105,8 +105,19 @@
*/
#define VIRTIO_F_NOTIFICATION_DATA 38
+/* This feature indicates that the driver uses the data provided by the device
+ * as a virtqueue identifier in available buffer notifications.
+ */
+#define VIRTIO_F_NOTIF_CONFIG_DATA 39
+
/*
* This feature indicates that the driver can reset a queue individually.
*/
#define VIRTIO_F_RING_RESET 40
+
+/*
+ * This feature indicates that the device support administration virtqueues.
+ */
+#define VIRTIO_F_ADMIN_VQ 41
+
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 12c1c9699935..cc65ef0f3c3e 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -56,6 +56,7 @@
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
+#define VIRTIO_NET_F_VQ_NOTF_COAL 52 /* Device supports virtqueue notification coalescing */
#define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */
#define VIRTIO_NET_F_GUEST_USO4 54 /* Guest can handle USOv4 in. */
#define VIRTIO_NET_F_GUEST_USO6 55 /* Guest can handle USOv6 in. */
@@ -391,5 +392,18 @@ struct virtio_net_ctrl_coal_rx {
};
#define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET 1
+#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET 2
+#define VIRTIO_NET_CTRL_NOTF_COAL_VQ_GET 3
+
+struct virtio_net_ctrl_coal {
+ __le32 max_packets;
+ __le32 max_usecs;
+};
+
+struct virtio_net_ctrl_coal_vq {
+ __le16 vqn;
+ __le16 reserved;
+ struct virtio_net_ctrl_coal coal;
+};
#endif /* _UAPI_LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index f703afc7ad31..ef3810dee7ef 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -166,6 +166,20 @@ struct virtio_pci_common_cfg {
__le32 queue_used_hi; /* read-write */
};
+/*
+ * Warning: do not use sizeof on this: use offsetofend for
+ * specific fields you need.
+ */
+struct virtio_pci_modern_common_cfg {
+ struct virtio_pci_common_cfg cfg;
+
+ __le16 queue_notify_data; /* read-write */
+ __le16 queue_reset; /* read-write */
+
+ __le16 admin_queue_index; /* read-only */
+ __le16 admin_queue_num; /* read-only */
+};
+
/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
struct virtio_pci_cfg_cap {
struct virtio_pci_cap cap;
@@ -204,7 +218,72 @@ struct virtio_pci_cfg_cap {
#define VIRTIO_PCI_COMMON_Q_USEDHI 52
#define VIRTIO_PCI_COMMON_Q_NDATA 56
#define VIRTIO_PCI_COMMON_Q_RESET 58
+#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60
+#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62
#endif /* VIRTIO_PCI_NO_MODERN */
+/* Admin command status. */
+#define VIRTIO_ADMIN_STATUS_OK 0
+
+/* Admin command opcode. */
+#define VIRTIO_ADMIN_CMD_LIST_QUERY 0x0
+#define VIRTIO_ADMIN_CMD_LIST_USE 0x1
+
+/* Admin command group type. */
+#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1
+
+/* Transitional device admin command. */
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE 0x2
+#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ 0x3
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE 0x4
+#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5
+#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6
+
+struct __packed virtio_admin_cmd_hdr {
+ __le16 opcode;
+ /*
+ * 1 - SR-IOV
+ * 2-65535 - reserved
+ */
+ __le16 group_type;
+ /* Unused, reserved for future extensions. */
+ __u8 reserved1[12];
+ __le64 group_member_id;
+};
+
+struct __packed virtio_admin_cmd_status {
+ __le16 status;
+ __le16 status_qualifier;
+ /* Unused, reserved for future extensions. */
+ __u8 reserved2[4];
+};
+
+struct __packed virtio_admin_cmd_legacy_wr_data {
+ __u8 offset; /* Starting offset of the register(s) to write. */
+ __u8 reserved[7];
+ __u8 registers[];
+};
+
+struct __packed virtio_admin_cmd_legacy_rd_data {
+ __u8 offset; /* Starting offset of the register(s) to read. */
+};
+
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1
+#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2
+
+#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4
+
+struct __packed virtio_admin_cmd_notify_info_data {
+ __u8 flags; /* 0 = end of list, 1 = owner device, 2 = member device */
+ __u8 bar; /* BAR of the member or the owner device */
+ __u8 padding[6];
+ __le64 offset; /* Offset within bar. */
+};
+
+struct virtio_admin_cmd_notify_info_result {
+ struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO];
+};
+
#endif
diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h
index d676b3620383..ede4f3564977 100644
--- a/include/uapi/linux/virtio_pmem.h
+++ b/include/uapi/linux/virtio_pmem.h
@@ -14,6 +14,13 @@
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
+/* Feature bits */
+/* guest physical address range will be indicated as shared memory region 0 */
+#define VIRTIO_PMEM_F_SHMEM_REGION 0
+
+/* shmid of the shared memory region corresponding to the pmem */
+#define VIRTIO_PMEM_SHMEM_REGION_ID 0
+
struct virtio_pmem_config {
__le64 start;
__le64 size;
diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
index c60ca33eac59..ed07181d4eff 100644
--- a/include/uapi/linux/vm_sockets.h
+++ b/include/uapi/linux/vm_sockets.h
@@ -191,4 +191,21 @@ struct sockaddr_vm {
#define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
+/* MSG_ZEROCOPY notifications are encoded in the standard error format,
+ * sock_extended_err. See Documentation/networking/msg_zerocopy.rst in
+ * kernel source tree for more details.
+ */
+
+/* 'cmsg_level' field value of 'struct cmsghdr' for notification parsing
+ * when MSG_ZEROCOPY flag is used on transmissions.
+ */
+
+#define SOL_VSOCK 287
+
+/* 'cmsg_type' field value of 'struct cmsghdr' for notification parsing
+ * when MSG_ZEROCOPY flag is used on transmissions.
+ */
+
+#define VSOCK_RECVERR 1
+
#endif /* _UAPI_VM_SOCKETS_H */
diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
index 08967b3f19c8..3c2ad5fae17f 100644
--- a/include/uapi/linux/wireless.h
+++ b/include/uapi/linux/wireless.h
@@ -835,7 +835,7 @@ struct iw_encode_ext {
* individual keys */
__u16 alg; /* IW_ENCODE_ALG_* */
__u16 key_len;
- __u8 key[0];
+ __u8 key[];
};
/* SIOCSIWMLME data */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 23543c33fee8..6a77328be114 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -4,6 +4,7 @@
#include <linux/in6.h>
#include <linux/types.h>
+#include <linux/stddef.h>
/* All of the structures in this file may not change size as they are
* passed into the kernel from userspace via netlink sockets.
@@ -33,7 +34,7 @@ struct xfrm_sec_ctx {
__u8 ctx_alg;
__u16 ctx_len;
__u32 ctx_sid;
- char ctx_str[];
+ char ctx_str[] __counted_by(ctx_len);
};
/* Security Context Domains of Interpretation */
diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h
index dcb179de4358..e1571603175e 100644
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -248,6 +248,7 @@ enum {
* @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
* @padding: reserved for future, not used, has to be zeroed
* @disable_fm: whether disable fastmap
+ * @need_resv_pool: whether reserve free pebs for filling pool/wl_pool
*
* This data structure is used to specify MTD device UBI has to attach and the
* parameters it has to use. The number which should be assigned to the new UBI
@@ -293,7 +294,8 @@ struct ubi_attach_req {
__s32 vid_hdr_offset;
__s16 max_beb_per1024;
__s8 disable_fm;
- __s8 padding[9];
+ __s8 need_resv_pool;
+ __s8 padding[8];
};
/*
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index c4e90775da0c..c0c34aca90ec 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -41,6 +41,7 @@
#define __BNXT_RE_UVERBS_ABI_H__
#include <linux/types.h>
+#include <rdma/ib_user_ioctl_cmds.h>
#define BNXT_RE_ABI_VERSION 1
@@ -51,6 +52,10 @@
enum {
BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL,
BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
+ BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL,
+ BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
+ BNXT_RE_UCNTX_CMASK_POW2_DISABLED = 0x10ULL,
+ BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40,
};
enum bnxt_re_wqe_mode {
@@ -59,6 +64,14 @@ enum bnxt_re_wqe_mode {
BNXT_QPLIB_WQE_MODE_INVALID = 0x02,
};
+enum {
+ BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
+};
+
+struct bnxt_re_uctx_req {
+ __aligned_u64 comp_mask;
+};
+
struct bnxt_re_uctx_resp {
__u32 dev_id;
__u32 max_qp;
@@ -89,11 +102,16 @@ struct bnxt_re_cq_req {
__aligned_u64 cq_handle;
};
+enum bnxt_re_cq_mask {
+ BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT = 0x1,
+};
+
struct bnxt_re_cq_resp {
__u32 cqid;
__u32 tail;
__u32 phase;
__u32 rsvd;
+ __aligned_u64 comp_mask;
};
struct bnxt_re_resize_cq_req {
@@ -127,4 +145,61 @@ enum bnxt_re_shpg_offt {
BNXT_RE_END_RESV_OFFT = 0xFF0
};
+enum bnxt_re_objects {
+ BNXT_RE_OBJECT_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_OBJECT_NOTIFY_DRV,
+ BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+};
+
+enum bnxt_re_alloc_page_type {
+ BNXT_RE_ALLOC_WC_PAGE = 0,
+ BNXT_RE_ALLOC_DBR_BAR_PAGE,
+ BNXT_RE_ALLOC_DBR_PAGE,
+};
+
+enum bnxt_re_var_alloc_page_attrs {
+ BNXT_RE_ALLOC_PAGE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_ALLOC_PAGE_TYPE,
+ BNXT_RE_ALLOC_PAGE_DPI,
+ BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
+ BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
+};
+
+enum bnxt_re_alloc_page_attrs {
+ BNXT_RE_DESTROY_PAGE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum bnxt_re_alloc_page_methods {
+ BNXT_RE_METHOD_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_METHOD_DESTROY_PAGE,
+};
+
+enum bnxt_re_notify_drv_methods {
+ BNXT_RE_METHOD_NOTIFY_DRV = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+/* Toggle mem */
+
+enum bnxt_re_get_toggle_mem_type {
+ BNXT_RE_CQ_TOGGLE_MEM = 0,
+ BNXT_RE_SRQ_TOGGLE_MEM,
+};
+
+enum bnxt_re_var_toggle_mem_attrs {
+ BNXT_RE_TOGGLE_MEM_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_TOGGLE_MEM_TYPE,
+ BNXT_RE_TOGGLE_MEM_RES_ID,
+ BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
+ BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+ BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
+};
+
+enum bnxt_re_toggle_mem_attrs {
+ BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum bnxt_re_toggle_mem_methods {
+ BNXT_RE_METHOD_GET_TOGGLE_MEM = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
+};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index d94c32f28804..701e2d567e41 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef EFA_ABI_USER_H
#define EFA_ABI_USER_H
#include <linux/types.h>
+#include <rdma/ib_user_ioctl_cmds.h>
/*
* Increment this value if any changes that break userspace ABI
@@ -134,4 +135,22 @@ struct efa_ibv_ex_query_device_resp {
__u32 device_caps;
};
+enum {
+ EFA_QUERY_MR_VALIDITY_RECV_IC_ID = 1 << 0,
+ EFA_QUERY_MR_VALIDITY_RDMA_READ_IC_ID = 1 << 1,
+ EFA_QUERY_MR_VALIDITY_RDMA_RECV_IC_ID = 1 << 2,
+};
+
+enum efa_query_mr_attrs {
+ EFA_IB_ATTR_QUERY_MR_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ EFA_IB_ATTR_QUERY_MR_RESP_IC_ID_VALIDITY,
+ EFA_IB_ATTR_QUERY_MR_RESP_RECV_IC_ID,
+ EFA_IB_ATTR_QUERY_MR_RESP_RDMA_READ_IC_ID,
+ EFA_IB_ATTR_QUERY_MR_RESP_RDMA_RECV_IC_ID,
+};
+
+enum efa_mr_methods {
+ EFA_IB_METHOD_MR_QUERY = (1U << UVERBS_ID_NS_SHIFT),
+};
+
#endif /* EFA_ABI_USER_H */
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index 2e68a8b0c92c..c996e151081e 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -52,15 +52,25 @@ struct hns_roce_ib_create_cq_resp {
__aligned_u64 cap_flags;
};
+enum hns_roce_srq_cap_flags {
+ HNS_ROCE_SRQ_CAP_RECORD_DB = 1 << 0,
+};
+
+enum hns_roce_srq_cap_flags_resp {
+ HNS_ROCE_RSP_SRQ_CAP_RECORD_DB = 1 << 0,
+};
+
struct hns_roce_ib_create_srq {
__aligned_u64 buf_addr;
__aligned_u64 db_addr;
__aligned_u64 que_addr;
+ __u32 req_cap_flags; /* Use enum hns_roce_srq_cap_flags */
+ __u32 reserved;
};
struct hns_roce_ib_create_srq_resp {
__u32 srqn;
- __u32 reserved;
+ __u32 cap_flags; /* Use enum hns_roce_srq_cap_flags */
};
struct hns_roce_ib_create_qp {
@@ -115,4 +125,9 @@ struct hns_roce_ib_alloc_pd_resp {
__u32 pdn;
};
+struct hns_roce_ib_create_ah_resp {
+ __u8 dmac[6];
+ __u8 reserved[2];
+};
+
#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index d7c5aaa32744..fe15bc7e9f70 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -220,7 +220,8 @@ enum ib_uverbs_advise_mr_flag {
struct ib_uverbs_query_port_resp_ex {
struct ib_uverbs_query_port_resp legacy_resp;
__u16 port_cap_flags2;
- __u8 reserved[6];
+ __u8 reserved[2];
+ __u32 active_speed_ex;
};
struct ib_uverbs_qp_cap {
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
index a7085e092d34..bb18f15489e3 100644
--- a/include/uapi/rdma/irdma-abi.h
+++ b/include/uapi/rdma/irdma-abi.h
@@ -22,10 +22,16 @@ enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_CQ = 2,
};
+enum {
+ IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+ IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
+};
+
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
+ __aligned_u64 comp_mask;
};
struct irdma_alloc_ucontext_resp {
@@ -46,6 +52,9 @@ struct irdma_alloc_ucontext_resp {
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
+ __aligned_u64 comp_mask;
+ __u16 min_hw_wq_size;
+ __u8 rsvd3[6];
};
struct irdma_alloc_pd_resp {
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index a96b7d2770e1..d4f6a36dffb0 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/if_ether.h> /* For ETH_ALEN. */
#include <rdma/ib_user_ioctl_verbs.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
@@ -275,6 +276,7 @@ struct mlx5_ib_query_device_resp {
__u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
struct mlx5_ib_dci_streams_caps dci_streams_caps;
__u16 reserved;
+ struct mlx5_ib_uapi_reg reg_c0;
};
enum mlx5_ib_create_cq_flags {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 7af9e09ea556..3189c7f08d17 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -64,6 +64,7 @@ enum mlx5_ib_uapi_dm_type {
MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM,
MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM,
MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM,
+ MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM,
};
enum mlx5_ib_uapi_devx_create_event_channel_flags {
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index e50c357367db..723bbb0f7042 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -299,6 +299,8 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_STAT_GET_STATUS,
+ RDMA_NLDEV_CMD_RES_SRQ_GET_RAW,
+
RDMA_NLDEV_NUM_OPS
};
@@ -554,6 +556,8 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, /* u32 */
RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, /* u8 */
+ RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, /* u8 */
+
/*
* Always the end
*/
diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h
index af735f55b291..6df49724954f 100644
--- a/include/uapi/rdma/siw-abi.h
+++ b/include/uapi/rdma/siw-abi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) or BSD-3-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
diff --git a/include/uapi/regulator/regulator.h b/include/uapi/regulator/regulator.h
new file mode 100644
index 000000000000..71bf71a22e7f
--- /dev/null
+++ b/include/uapi/regulator/regulator.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Regulator uapi header
+ *
+ * Author: Naresh Solanki <Naresh.Solanki@9elements.com>
+ */
+
+#ifndef _UAPI_REGULATOR_H
+#define _UAPI_REGULATOR_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/*
+ * Regulator notifier events.
+ *
+ * UNDER_VOLTAGE Regulator output is under voltage.
+ * OVER_CURRENT Regulator output current is too high.
+ * REGULATION_OUT Regulator output is out of regulation.
+ * FAIL Regulator output has failed.
+ * OVER_TEMP Regulator over temp.
+ * FORCE_DISABLE Regulator forcibly shut down by software.
+ * VOLTAGE_CHANGE Regulator voltage changed.
+ * Data passed is old voltage cast to (void *).
+ * DISABLE Regulator was disabled.
+ * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
+ * Data passed is "struct pre_voltage_change_data"
+ * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
+ * Data passed is old voltage cast to (void *).
+ * PRE_DISABLE Regulator is about to be disabled
+ * ABORT_DISABLE Regulator disable failed for some reason
+ *
+ * NOTE: These events can be OR'ed together when passed into handler.
+ */
+
+#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
+#define REGULATOR_EVENT_OVER_CURRENT 0x02
+#define REGULATOR_EVENT_REGULATION_OUT 0x04
+#define REGULATOR_EVENT_FAIL 0x08
+#define REGULATOR_EVENT_OVER_TEMP 0x10
+#define REGULATOR_EVENT_FORCE_DISABLE 0x20
+#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+#define REGULATOR_EVENT_DISABLE 0x80
+#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
+#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
+#define REGULATOR_EVENT_PRE_DISABLE 0x400
+#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+#define REGULATOR_EVENT_ENABLE 0x1000
+/*
+ * Following notifications should be emitted only if detected condition
+ * is such that the HW is likely to still be working but consumers should
+ * take a recovery action to prevent problems escalating into errors.
+ */
+#define REGULATOR_EVENT_UNDER_VOLTAGE_WARN 0x2000
+#define REGULATOR_EVENT_OVER_CURRENT_WARN 0x4000
+#define REGULATOR_EVENT_OVER_VOLTAGE_WARN 0x8000
+#define REGULATOR_EVENT_OVER_TEMP_WARN 0x10000
+#define REGULATOR_EVENT_WARN_MASK 0x1E000
+
+struct reg_genl_event {
+ char reg_name[32];
+ uint64_t event;
+};
+
+/* attributes of reg_genl_family */
+enum {
+ REG_GENL_ATTR_UNSPEC,
+ REG_GENL_ATTR_EVENT, /* reg event info needed by user space */
+ __REG_GENL_ATTR_MAX,
+};
+
+#define REG_GENL_ATTR_MAX (__REG_GENL_ATTR_MAX - 1)
+
+/* commands supported by the reg_genl_family */
+enum {
+ REG_GENL_CMD_UNSPEC,
+ REG_GENL_CMD_EVENT, /* kernel->user notifications for reg events */
+ __REG_GENL_CMD_MAX,
+};
+
+#define REG_GENL_CMD_MAX (__REG_GENL_CMD_MAX - 1)
+
+#define REG_GENL_FAMILY_NAME "reg_event"
+#define REG_GENL_VERSION 0x01
+#define REG_GENL_MCAST_GROUP_NAME "reg_mc_group"
+
+#endif /* _UAPI_REGULATOR_H */
diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h
index 907d345f04f9..c72ce387286a 100644
--- a/include/uapi/scsi/scsi_bsg_mpi3mr.h
+++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h
@@ -491,6 +491,8 @@ struct mpi3_nvme_encapsulated_error_reply {
#define MPI3MR_NVME_DATA_FORMAT_PRP 0
#define MPI3MR_NVME_DATA_FORMAT_SGL1 1
#define MPI3MR_NVME_DATA_FORMAT_SGL2 2
+#define MPI3MR_NVMESGL_DATA_SEGMENT 0x00
+#define MPI3MR_NVMESGL_LAST_SEGMENT 0x03
/* MPI3: task management related definitions */
struct mpi3_scsi_task_mgmt_request {
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index 2801b65299aa..03f2beadf201 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -8,6 +8,7 @@
#ifndef SCSI_BSG_UFS_H
#define SCSI_BSG_UFS_H
+#include <asm/byteorder.h>
#include <linux/types.h>
/*
* This file intended to be included by both kernel and user space
@@ -40,11 +41,56 @@ enum ufs_rpmb_op_type {
* @dword_0: UPIU header DW-0
* @dword_1: UPIU header DW-1
* @dword_2: UPIU header DW-2
+ *
+ * @transaction_code: Type of request or response. See also enum
+ * upiu_request_transaction and enum upiu_response_transaction.
+ * @flags: UPIU flags. The meaning of individual flags depends on the
+ * transaction code.
+ * @lun: Logical unit number.
+ * @task_tag: Task tag.
+ * @iid: Initiator ID.
+ * @command_set_type: 0 for SCSI command set; 1 for UFS specific.
+ * @tm_function: Task management function in case of a task management request
+ * UPIU.
+ * @query_function: Query function in case of a query request UPIU.
+ * @response: 0 for success; 1 for failure.
+ * @status: SCSI status if this is the header of a response to a SCSI command.
+ * @ehs_length: EHS length in units of 32 bytes.
+ * @device_information:
+ * @data_segment_length: data segment length.
*/
struct utp_upiu_header {
- __be32 dword_0;
- __be32 dword_1;
- __be32 dword_2;
+ union {
+ struct {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+ };
+ struct {
+ __u8 transaction_code;
+ __u8 flags;
+ __u8 lun;
+ __u8 task_tag;
+#if defined(__BIG_ENDIAN)
+ __u8 iid: 4;
+ __u8 command_set_type: 4;
+#elif defined(__LITTLE_ENDIAN)
+ __u8 command_set_type: 4;
+ __u8 iid: 4;
+#else
+#error
+#endif
+ union {
+ __u8 tm_function;
+ __u8 query_function;
+ } __attribute__((packed));
+ __u8 response;
+ __u8 status;
+ __u8 ehs_length;
+ __u8 device_information;
+ __be16 data_segment_length;
+ };
+ };
};
/**
@@ -71,6 +117,31 @@ struct utp_upiu_query {
};
/**
+ * struct utp_upiu_query_v4_0 - upiu request buffer structure for
+ * query request >= UFS 4.0 spec.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @osf4: spec field B-5
+ * @osf5: spec field B 6,7
+ * @osf6: spec field DW 8,9
+ * @osf7: spec field DW 10,11
+ */
+struct utp_upiu_query_v4_0 {
+ __u8 opcode;
+ __u8 idn;
+ __u8 index;
+ __u8 selector;
+ __u8 osf3;
+ __u8 osf4;
+ __be16 osf5;
+ __be32 osf6;
+ __be32 osf7;
+ __be32 reserved;
+};
+
+/**
* struct utp_upiu_cmd - Command UPIU structure
* @data_transfer_len: Data Transfer Length DW-3
* @cdb: Command Descriptor Block CDB DW-4 to DW-7
diff --git a/include/uapi/sound/asequencer.h b/include/uapi/sound/asequencer.h
index 00d2703e8fca..c85fdd8895d8 100644
--- a/include/uapi/sound/asequencer.h
+++ b/include/uapi/sound/asequencer.h
@@ -10,7 +10,7 @@
#include <sound/asound.h>
/** version of the sequencer */
-#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 2)
+#define SNDRV_SEQ_VERSION SNDRV_PROTOCOL_VERSION(1, 0, 3)
/**
* definition of sequencer event types
@@ -174,6 +174,7 @@ struct snd_seq_connect {
#define SNDRV_SEQ_PRIORITY_HIGH (1<<4) /* event should be processed before others */
#define SNDRV_SEQ_PRIORITY_MASK (1<<4)
+#define SNDRV_SEQ_EVENT_UMP (1<<5) /* event holds a UMP packet */
/* note event */
struct snd_seq_ev_note {
@@ -206,7 +207,7 @@ struct snd_seq_ev_raw32 {
struct snd_seq_ev_ext {
unsigned int len; /* length of data */
void *ptr; /* pointer to data (note: maybe 64-bit) */
-} __attribute__((packed));
+} __packed;
struct snd_seq_result {
int event; /* processed event type */
@@ -250,8 +251,21 @@ struct snd_seq_ev_quote {
struct snd_seq_addr origin; /* original sender */
unsigned short value; /* optional data */
struct snd_seq_event *event; /* quoted event */
-} __attribute__((packed));
-
+} __packed;
+
+union snd_seq_event_data { /* event data... */
+ struct snd_seq_ev_note note;
+ struct snd_seq_ev_ctrl control;
+ struct snd_seq_ev_raw8 raw8;
+ struct snd_seq_ev_raw32 raw32;
+ struct snd_seq_ev_ext ext;
+ struct snd_seq_ev_queue_control queue;
+ union snd_seq_timestamp time;
+ struct snd_seq_addr addr;
+ struct snd_seq_connect connect;
+ struct snd_seq_result result;
+ struct snd_seq_ev_quote quote;
+};
/* sequencer event */
struct snd_seq_event {
@@ -262,25 +276,27 @@ struct snd_seq_event {
unsigned char queue; /* schedule queue */
union snd_seq_timestamp time; /* schedule time */
-
struct snd_seq_addr source; /* source address */
struct snd_seq_addr dest; /* destination address */
- union { /* event data... */
- struct snd_seq_ev_note note;
- struct snd_seq_ev_ctrl control;
- struct snd_seq_ev_raw8 raw8;
- struct snd_seq_ev_raw32 raw32;
- struct snd_seq_ev_ext ext;
- struct snd_seq_ev_queue_control queue;
- union snd_seq_timestamp time;
- struct snd_seq_addr addr;
- struct snd_seq_connect connect;
- struct snd_seq_result result;
- struct snd_seq_ev_quote quote;
- } data;
+ union snd_seq_event_data data;
};
+ /* (compatible) event for UMP-capable clients */
+struct snd_seq_ump_event {
+ snd_seq_event_type_t type; /* event type */
+ unsigned char flags; /* event flags */
+ char tag;
+ unsigned char queue; /* schedule queue */
+ union snd_seq_timestamp time; /* schedule time */
+ struct snd_seq_addr source; /* source address */
+ struct snd_seq_addr dest; /* destination address */
+
+ union {
+ union snd_seq_event_data data;
+ unsigned int ump[4];
+ };
+};
/*
* bounce event - stored as variable size data
@@ -331,6 +347,7 @@ typedef int __bitwise snd_seq_client_type_t;
#define SNDRV_SEQ_FILTER_BROADCAST (1U<<0) /* accept broadcast messages */
#define SNDRV_SEQ_FILTER_MULTICAST (1U<<1) /* accept multicast messages */
#define SNDRV_SEQ_FILTER_BOUNCE (1U<<2) /* accept bounce event in error */
+#define SNDRV_SEQ_FILTER_NO_CONVERT (1U<<30) /* don't convert UMP events */
#define SNDRV_SEQ_FILTER_USE_EVENT (1U<<31) /* use event filter */
struct snd_seq_client_info {
@@ -344,9 +361,18 @@ struct snd_seq_client_info {
int event_lost; /* number of lost events */
int card; /* RO: card number[kernel] */
int pid; /* RO: pid[user] */
- char reserved[56]; /* for future use */
+ unsigned int midi_version; /* MIDI version */
+ unsigned int group_filter; /* UMP group filter bitmap
+ * (bit 0 = groupless messages,
+ * bit 1-16 = messages for groups 1-16)
+ */
+ char reserved[48]; /* for future use */
};
+/* MIDI version numbers in client info */
+#define SNDRV_SEQ_CLIENT_LEGACY_MIDI 0 /* Legacy client */
+#define SNDRV_SEQ_CLIENT_UMP_MIDI_1_0 1 /* UMP MIDI 1.0 */
+#define SNDRV_SEQ_CLIENT_UMP_MIDI_2_0 2 /* UMP MIDI 2.0 */
/* client pool size */
struct snd_seq_client_pool {
@@ -406,6 +432,8 @@ struct snd_seq_remove_events {
#define SNDRV_SEQ_PORT_CAP_SUBS_READ (1<<5) /* allow read subscription */
#define SNDRV_SEQ_PORT_CAP_SUBS_WRITE (1<<6) /* allow write subscription */
#define SNDRV_SEQ_PORT_CAP_NO_EXPORT (1<<7) /* routing not allowed */
+#define SNDRV_SEQ_PORT_CAP_INACTIVE (1<<8) /* inactive port */
+#define SNDRV_SEQ_PORT_CAP_UMP_ENDPOINT (1<<9) /* MIDI 2.0 UMP Endpoint port */
/* port type */
#define SNDRV_SEQ_PORT_TYPE_SPECIFIC (1<<0) /* hardware specific */
@@ -415,6 +443,7 @@ struct snd_seq_remove_events {
#define SNDRV_SEQ_PORT_TYPE_MIDI_XG (1<<4) /* XG compatible device */
#define SNDRV_SEQ_PORT_TYPE_MIDI_MT32 (1<<5) /* MT-32 compatible device */
#define SNDRV_SEQ_PORT_TYPE_MIDI_GM2 (1<<6) /* General MIDI 2 compatible device */
+#define SNDRV_SEQ_PORT_TYPE_MIDI_UMP (1<<7) /* UMP */
/* other standards...*/
#define SNDRV_SEQ_PORT_TYPE_SYNTH (1<<10) /* Synth device (no MIDI compatible - direct wavetable) */
@@ -432,6 +461,12 @@ struct snd_seq_remove_events {
#define SNDRV_SEQ_PORT_FLG_TIMESTAMP (1<<1)
#define SNDRV_SEQ_PORT_FLG_TIME_REAL (1<<2)
+/* port direction */
+#define SNDRV_SEQ_PORT_DIR_UNKNOWN 0
+#define SNDRV_SEQ_PORT_DIR_INPUT 1
+#define SNDRV_SEQ_PORT_DIR_OUTPUT 2
+#define SNDRV_SEQ_PORT_DIR_BIDIRECTION 3
+
struct snd_seq_port_info {
struct snd_seq_addr addr; /* client/port numbers */
char name[64]; /* port name */
@@ -448,7 +483,9 @@ struct snd_seq_port_info {
void *kernel; /* reserved for kernel use (must be NULL) */
unsigned int flags; /* misc. conditioning */
unsigned char time_queue; /* queue # for timestamping */
- char reserved[59]; /* for future use */
+ unsigned char direction; /* port usage direction (r/w/bidir) */
+ unsigned char ump_group; /* 0 = UMP EP (no conversion), 1-16 = UMP group number */
+ char reserved[57]; /* for future use */
};
@@ -552,6 +589,18 @@ struct snd_seq_query_subs {
char reserved[64]; /* for future use */
};
+/*
+ * UMP-specific information
+ */
+/* type of UMP info query */
+#define SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT 0
+#define SNDRV_SEQ_CLIENT_UMP_INFO_BLOCK 1
+
+struct snd_seq_client_ump_info {
+ int client; /* client number to inquire/set */
+ int type; /* type to inquire/set */
+ unsigned char info[512]; /* info (either UMP ep or block info) */
+} __packed;
/*
* IOCTL commands
@@ -561,9 +610,12 @@ struct snd_seq_query_subs {
#define SNDRV_SEQ_IOCTL_CLIENT_ID _IOR ('S', 0x01, int)
#define SNDRV_SEQ_IOCTL_SYSTEM_INFO _IOWR('S', 0x02, struct snd_seq_system_info)
#define SNDRV_SEQ_IOCTL_RUNNING_MODE _IOWR('S', 0x03, struct snd_seq_running_info)
+#define SNDRV_SEQ_IOCTL_USER_PVERSION _IOW('S', 0x04, int)
#define SNDRV_SEQ_IOCTL_GET_CLIENT_INFO _IOWR('S', 0x10, struct snd_seq_client_info)
#define SNDRV_SEQ_IOCTL_SET_CLIENT_INFO _IOW ('S', 0x11, struct snd_seq_client_info)
+#define SNDRV_SEQ_IOCTL_GET_CLIENT_UMP_INFO _IOWR('S', 0x12, struct snd_seq_client_ump_info)
+#define SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO _IOWR('S', 0x13, struct snd_seq_client_ump_info)
#define SNDRV_SEQ_IOCTL_CREATE_PORT _IOWR('S', 0x20, struct snd_seq_port_info)
#define SNDRV_SEQ_IOCTL_DELETE_PORT _IOW ('S', 0x21, struct snd_seq_port_info)
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 0aa955aa8246..628d46a0da92 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -142,7 +142,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 15)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 17)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -267,13 +267,17 @@ typedef int __bitwise snd_pcm_format_t;
typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_SUBFORMAT_STD ((__force snd_pcm_subformat_t) 0)
-#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_STD
+#define SNDRV_PCM_SUBFORMAT_MSBITS_MAX ((__force snd_pcm_subformat_t) 1)
+#define SNDRV_PCM_SUBFORMAT_MSBITS_20 ((__force snd_pcm_subformat_t) 2)
+#define SNDRV_PCM_SUBFORMAT_MSBITS_24 ((__force snd_pcm_subformat_t) 3)
+#define SNDRV_PCM_SUBFORMAT_LAST SNDRV_PCM_SUBFORMAT_MSBITS_24
#define SNDRV_PCM_INFO_MMAP 0x00000001 /* hardware supports mmap */
#define SNDRV_PCM_INFO_MMAP_VALID 0x00000002 /* period data are valid during transfer */
#define SNDRV_PCM_INFO_DOUBLE 0x00000004 /* Double buffering needed for PCM start/stop */
#define SNDRV_PCM_INFO_BATCH 0x00000010 /* double buffering */
#define SNDRV_PCM_INFO_SYNC_APPLPTR 0x00000020 /* need the explicit sync of appl_ptr update */
+#define SNDRV_PCM_INFO_PERFECT_DRAIN 0x00000040 /* silencing at the end of stream is not required */
#define SNDRV_PCM_INFO_INTERLEAVED 0x00000100 /* channels are interleaved */
#define SNDRV_PCM_INFO_NONINTERLEAVED 0x00000200 /* channels are not interleaved */
#define SNDRV_PCM_INFO_COMPLEX 0x00000400 /* complex frame organization (mmap only) */
@@ -383,6 +387,9 @@ typedef int snd_pcm_hw_param_t;
#define SNDRV_PCM_HW_PARAMS_NORESAMPLE (1<<0) /* avoid rate resampling */
#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER (1<<1) /* export buffer */
#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP (1<<2) /* disable period wakeups */
+#define SNDRV_PCM_HW_PARAMS_NO_DRAIN_SILENCE (1<<3) /* suppress drain with the filling
+ * of the silence samples
+ */
struct snd_interval {
unsigned int min, max;
@@ -409,7 +416,7 @@ struct snd_pcm_hw_params {
unsigned int rmask; /* W: requested masks */
unsigned int cmask; /* R: changed masks */
unsigned int info; /* R: Info flags for returned setup */
- unsigned int msbits; /* R: used most significant bits */
+ unsigned int msbits; /* R: used most significant bits (in sample bit-width) */
unsigned int rate_num; /* R: rate numerator */
unsigned int rate_den; /* R: rate denominator */
snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */
@@ -708,7 +715,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 4)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -719,6 +726,7 @@ enum {
#define SNDRV_RAWMIDI_INFO_OUTPUT 0x00000001
#define SNDRV_RAWMIDI_INFO_INPUT 0x00000002
#define SNDRV_RAWMIDI_INFO_DUPLEX 0x00000004
+#define SNDRV_RAWMIDI_INFO_UMP 0x00000008
struct snd_rawmidi_info {
unsigned int device; /* RO/WR (control): device number */
@@ -779,6 +787,72 @@ struct snd_rawmidi_status {
};
#endif
+/* UMP EP info flags */
+#define SNDRV_UMP_EP_INFO_STATIC_BLOCKS 0x01
+
+/* UMP EP Protocol / JRTS capability bits */
+#define SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK 0x0300
+#define SNDRV_UMP_EP_INFO_PROTO_MIDI1 0x0100 /* MIDI 1.0 */
+#define SNDRV_UMP_EP_INFO_PROTO_MIDI2 0x0200 /* MIDI 2.0 */
+#define SNDRV_UMP_EP_INFO_PROTO_JRTS_MASK 0x0003
+#define SNDRV_UMP_EP_INFO_PROTO_JRTS_TX 0x0001 /* JRTS Transmit */
+#define SNDRV_UMP_EP_INFO_PROTO_JRTS_RX 0x0002 /* JRTS Receive */
+
+/* UMP Endpoint information */
+struct snd_ump_endpoint_info {
+ int card; /* card number */
+ int device; /* device number */
+ unsigned int flags; /* additional info */
+ unsigned int protocol_caps; /* protocol capabilities */
+ unsigned int protocol; /* current protocol */
+ unsigned int num_blocks; /* # of function blocks */
+ unsigned short version; /* UMP major/minor version */
+ unsigned short family_id; /* MIDI device family ID */
+ unsigned short model_id; /* MIDI family model ID */
+ unsigned int manufacturer_id; /* MIDI manufacturer ID */
+ unsigned char sw_revision[4]; /* software revision */
+ unsigned short padding;
+ unsigned char name[128]; /* endpoint name string */
+ unsigned char product_id[128]; /* unique product id string */
+ unsigned char reserved[32];
+} __packed;
+
+/* UMP direction */
+#define SNDRV_UMP_DIR_INPUT 0x01
+#define SNDRV_UMP_DIR_OUTPUT 0x02
+#define SNDRV_UMP_DIR_BIDIRECTION 0x03
+
+/* UMP block info flags */
+#define SNDRV_UMP_BLOCK_IS_MIDI1 (1U << 0) /* MIDI 1.0 port w/o restrict */
+#define SNDRV_UMP_BLOCK_IS_LOWSPEED (1U << 1) /* 31.25Kbps B/W MIDI1 port */
+
+/* UMP block user-interface hint */
+#define SNDRV_UMP_BLOCK_UI_HINT_UNKNOWN 0x00
+#define SNDRV_UMP_BLOCK_UI_HINT_RECEIVER 0x01
+#define SNDRV_UMP_BLOCK_UI_HINT_SENDER 0x02
+#define SNDRV_UMP_BLOCK_UI_HINT_BOTH 0x03
+
+/* UMP groups and blocks */
+#define SNDRV_UMP_MAX_GROUPS 16
+#define SNDRV_UMP_MAX_BLOCKS 32
+
+/* UMP Block information */
+struct snd_ump_block_info {
+ int card; /* card number */
+ int device; /* device number */
+ unsigned char block_id; /* block ID (R/W) */
+ unsigned char direction; /* UMP direction */
+ unsigned char active; /* Activeness */
+ unsigned char first_group; /* first group ID */
+ unsigned char num_groups; /* number of groups */
+ unsigned char midi_ci_version; /* MIDI-CI support version */
+ unsigned char sysex8_streams; /* max number of sysex8 streams */
+ unsigned char ui_hint; /* user interface hint */
+ unsigned int flags; /* various info flags */
+ unsigned char name[128]; /* block name string */
+ unsigned char reserved[32];
+} __packed;
+
#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
@@ -786,6 +860,9 @@ struct snd_rawmidi_status {
#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
#define SNDRV_RAWMIDI_IOCTL_DRAIN _IOW('W', 0x31, int)
+/* Additional ioctls for UMP rawmidi devices */
+#define SNDRV_UMP_IOCTL_ENDPOINT_INFO _IOR('W', 0x40, struct snd_ump_endpoint_info)
+#define SNDRV_UMP_IOCTL_BLOCK_INFO _IOR('W', 0x41, struct snd_ump_block_info)
/*
* Timer section - /dev/snd/timer
@@ -961,7 +1038,7 @@ struct snd_timer_tread {
* *
****************************************************************************/
-#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 8)
+#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 9)
struct snd_ctl_card_info {
int card; /* card number */
@@ -1122,6 +1199,9 @@ struct snd_ctl_tlv {
#define SNDRV_CTL_IOCTL_RAWMIDI_NEXT_DEVICE _IOWR('U', 0x40, int)
#define SNDRV_CTL_IOCTL_RAWMIDI_INFO _IOWR('U', 0x41, struct snd_rawmidi_info)
#define SNDRV_CTL_IOCTL_RAWMIDI_PREFER_SUBDEVICE _IOW('U', 0x42, int)
+#define SNDRV_CTL_IOCTL_UMP_NEXT_DEVICE _IOWR('U', 0x43, int)
+#define SNDRV_CTL_IOCTL_UMP_ENDPOINT_INFO _IOWR('U', 0x44, struct snd_ump_endpoint_info)
+#define SNDRV_CTL_IOCTL_UMP_BLOCK_INFO _IOWR('U', 0x45, struct snd_ump_block_info)
#define SNDRV_CTL_IOCTL_POWER _IOWR('U', 0xd0, int)
#define SNDRV_CTL_IOCTL_POWER_STATE _IOR('U', 0xd1, int)
diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h
index c8e131d6da00..4c32a116e7ad 100644
--- a/include/uapi/sound/emu10k1.h
+++ b/include/uapi/sound/emu10k1.h
@@ -308,6 +308,8 @@ struct snd_emu10k1_fx8010_info {
#define EMU10K1_GPR_TRANSLATION_BASS 2
#define EMU10K1_GPR_TRANSLATION_TREBLE 3
#define EMU10K1_GPR_TRANSLATION_ONOFF 4
+#define EMU10K1_GPR_TRANSLATION_NEGATE 5
+#define EMU10K1_GPR_TRANSLATION_NEG_TABLE100 6
enum emu10k1_ctl_elem_iface {
EMU10K1_CTL_ELEM_IFACE_MIXER = 2, /* virtual mixer device */
@@ -328,9 +330,9 @@ struct snd_emu10k1_fx8010_control_gpr {
unsigned int vcount; /* visible count */
unsigned int count; /* count of GPR (1..16) */
unsigned short gpr[32]; /* GPR number(s) */
- unsigned int value[32]; /* initial values */
- unsigned int min; /* minimum range */
- unsigned int max; /* maximum range */
+ int value[32]; /* initial values */
+ int min; /* minimum range */
+ int max; /* maximum range */
unsigned int translation; /* translation type (EMU10K1_GPR_TRANSLATION*) */
const unsigned int *tlv;
};
diff --git a/include/uapi/sound/scarlett2.h b/include/uapi/sound/scarlett2.h
new file mode 100644
index 000000000000..91467ab0ed70
--- /dev/null
+++ b/include/uapi/sound/scarlett2.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Focusrite Scarlett 2 Protocol Driver for ALSA
+ * (including Scarlett 2nd Gen, 3rd Gen, 4th Gen, Clarett USB, and
+ * Clarett+ series products)
+ *
+ * Copyright (c) 2023 by Geoffrey D. Bennett <g at b4.vu>
+ */
+#ifndef __UAPI_SOUND_SCARLETT2_H
+#define __UAPI_SOUND_SCARLETT2_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SCARLETT2_HWDEP_MAJOR 1
+#define SCARLETT2_HWDEP_MINOR 0
+#define SCARLETT2_HWDEP_SUBMINOR 0
+
+#define SCARLETT2_HWDEP_VERSION \
+ ((SCARLETT2_HWDEP_MAJOR << 16) | \
+ (SCARLETT2_HWDEP_MINOR << 8) | \
+ SCARLETT2_HWDEP_SUBMINOR)
+
+#define SCARLETT2_HWDEP_VERSION_MAJOR(v) (((v) >> 16) & 0xFF)
+#define SCARLETT2_HWDEP_VERSION_MINOR(v) (((v) >> 8) & 0xFF)
+#define SCARLETT2_HWDEP_VERSION_SUBMINOR(v) ((v) & 0xFF)
+
+/* Get protocol version */
+#define SCARLETT2_IOCTL_PVERSION _IOR('S', 0x60, int)
+
+/* Reboot */
+#define SCARLETT2_IOCTL_REBOOT _IO('S', 0x61)
+
+/* Select flash segment */
+#define SCARLETT2_SEGMENT_ID_SETTINGS 0
+#define SCARLETT2_SEGMENT_ID_FIRMWARE 1
+#define SCARLETT2_SEGMENT_ID_COUNT 2
+
+#define SCARLETT2_IOCTL_SELECT_FLASH_SEGMENT _IOW('S', 0x62, int)
+
+/* Erase selected flash segment */
+#define SCARLETT2_IOCTL_ERASE_FLASH_SEGMENT _IO('S', 0x63)
+
+/* Get selected flash segment erase progress
+ * 1 through to num_blocks, or 255 for complete
+ */
+struct scarlett2_flash_segment_erase_progress {
+ unsigned char progress;
+ unsigned char num_blocks;
+};
+#define SCARLETT2_IOCTL_GET_ERASE_PROGRESS \
+ _IOR('S', 0x64, struct scarlett2_flash_segment_erase_progress)
+
+#endif /* __UAPI_SOUND_SCARLETT2_H */
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index e9ec7e4eb982..ee5708934614 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -35,6 +35,7 @@
/* buffers */
#define SOF_TKN_BUF_SIZE 100
#define SOF_TKN_BUF_CAPS 101
+#define SOF_TKN_BUF_FLAGS 102
/* DAI */
/* Token retired with ABI 3.2, do not use for new capabilities
@@ -99,7 +100,11 @@
#define SOF_TKN_COMP_OUTPUT_PIN_BINDING_WNAME 414
#define SOF_TKN_COMP_NUM_INPUT_AUDIO_FORMATS 415
#define SOF_TKN_COMP_NUM_OUTPUT_AUDIO_FORMATS 416
-
+/*
+ * The token value is copied to the dapm_widget's
+ * no_wname_in_kcontrol_name.
+ */
+#define SOF_TKN_COMP_NO_WNAME_IN_KCONTROL_NAME 417
/* SSP */
#define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500
@@ -209,4 +214,8 @@
#define SOF_TKN_AMD_ACPI2S_CH 1701
#define SOF_TKN_AMD_ACPI2S_TDM_MODE 1702
+/* MICFIL PDM */
+#define SOF_TKN_IMX_MICFIL_RATE 2000
+#define SOF_TKN_IMX_MICFIL_CH 2001
+
#endif
diff --git a/include/uapi/xen/evtchn.h b/include/uapi/xen/evtchn.h
index 7fbf732f168f..aef2b75f3413 100644
--- a/include/uapi/xen/evtchn.h
+++ b/include/uapi/xen/evtchn.h
@@ -101,4 +101,13 @@ struct ioctl_evtchn_restrict_domid {
domid_t domid;
};
+/*
+ * Bind statically allocated @port.
+ */
+#define IOCTL_EVTCHN_BIND_STATIC \
+ _IOC(_IOC_NONE, 'E', 7, sizeof(struct ioctl_evtchn_bind))
+struct ioctl_evtchn_bind {
+ unsigned int port;
+};
+
#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
diff --git a/include/uapi/xen/gntalloc.h b/include/uapi/xen/gntalloc.h
index 48d2790ef928..3109282672f3 100644
--- a/include/uapi/xen/gntalloc.h
+++ b/include/uapi/xen/gntalloc.h
@@ -31,7 +31,10 @@ struct ioctl_gntalloc_alloc_gref {
__u64 index;
/* The grant references of the newly created grant, one per page */
/* Variable size, depending on count */
- __u32 gref_ids[1];
+ union {
+ __u32 gref_ids[1];
+ __DECLARE_FLEX_ARRAY(__u32, gref_ids_flex);
+ };
};
#define GNTALLOC_FLAG_WRITABLE 1
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index d2029556083e..8b8c5d1420fe 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -98,6 +98,34 @@ struct privcmd_mmap_resource {
__u64 addr;
};
+/* For privcmd_irqfd::flags */
+#define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0)
+
+struct privcmd_irqfd {
+ __u64 dm_op;
+ __u32 size; /* Size of structure pointed by dm_op */
+ __u32 fd;
+ __u32 flags;
+ domid_t dom;
+ __u8 pad[2];
+};
+
+/* For privcmd_ioeventfd::flags */
+#define PRIVCMD_IOEVENTFD_FLAG_DEASSIGN (1 << 0)
+
+struct privcmd_ioeventfd {
+ __u64 ioreq;
+ __u64 ports;
+ __u64 addr;
+ __u32 addr_len;
+ __u32 event_fd;
+ __u32 vcpus;
+ __u32 vq;
+ __u32 flags;
+ domid_t dom;
+ __u8 pad[2];
+};
+
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
@@ -125,5 +153,9 @@ struct privcmd_mmap_resource {
_IOC(_IOC_NONE, 'P', 6, sizeof(domid_t))
#define IOCTL_PRIVCMD_MMAP_RESOURCE \
_IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource))
+#define IOCTL_PRIVCMD_IRQFD \
+ _IOW('P', 8, struct privcmd_irqfd)
+#define IOCTL_PRIVCMD_IOEVENTFD \
+ _IOW('P', 9, struct privcmd_ioeventfd)
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 4e8d6240e589..b6003749bc83 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -11,9 +11,16 @@
#ifndef _UFS_H
#define _UFS_H
-#include <linux/mutex.h>
+#include <linux/bitops.h>
#include <linux/types.h>
#include <uapi/scsi/scsi_bsg_ufs.h>
+#include <linux/time64.h>
+
+/*
+ * Using static_assert() is not allowed in UAPI header files. Hence the check
+ * in this header file of the size of struct utp_upiu_header.
+ */
+static_assert(sizeof(struct utp_upiu_header) == 12);
#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
#define QUERY_DESC_MAX_SIZE 255
@@ -23,9 +30,6 @@
(sizeof(struct utp_upiu_header)))
#define UFS_SENSE_SIZE 18
-#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
- cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
- (byte1 << 8) | (byte0))
/*
* UFS device may have standard LUs and LUN id could be from 0x00 to
* 0x7F. Standard LUs use "Peripheral Device Addressing Format".
@@ -76,7 +80,7 @@ enum {
};
/* UTP UPIU Transaction Codes Initiator to Target */
-enum {
+enum upiu_request_transaction {
UPIU_TRANSACTION_NOP_OUT = 0x00,
UPIU_TRANSACTION_COMMAND = 0x01,
UPIU_TRANSACTION_DATA_OUT = 0x02,
@@ -85,7 +89,7 @@ enum {
};
/* UTP UPIU Transaction Codes Target to Initiator */
-enum {
+enum upiu_response_transaction {
UPIU_TRANSACTION_NOP_IN = 0x20,
UPIU_TRANSACTION_RESPONSE = 0x21,
UPIU_TRANSACTION_DATA_IN = 0x22,
@@ -95,13 +99,20 @@ enum {
UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
};
-/* UPIU Read/Write flags */
+/* UPIU Read/Write flags. See also table "UPIU Flags" in the UFS standard. */
enum {
UPIU_CMD_FLAGS_NONE = 0x00,
+ UPIU_CMD_FLAGS_CP = 0x04,
UPIU_CMD_FLAGS_WRITE = 0x20,
UPIU_CMD_FLAGS_READ = 0x40,
};
+/* UPIU response flags */
+enum {
+ UPIU_RSP_FLAG_UNDERFLOW = 0x20,
+ UPIU_RSP_FLAG_OVERFLOW = 0x40,
+};
+
/* UPIU Task Attributes */
enum {
UPIU_TASK_ATTR_SIMPLE = 0x00,
@@ -170,6 +181,7 @@ enum attr_idn {
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
QUERY_ATTR_IDN_EXT_IID_EN = 0x2A,
+ QUERY_ATTR_IDN_TIMESTAMP = 0x30
};
/* Descriptor idn for Query requests */
@@ -465,21 +477,11 @@ enum {
UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
};
-/* UTP Transfer Request Command Offset */
-#define UPIU_COMMAND_TYPE_OFFSET 28
-
/* Offset of the response code in the UPIU header */
#define UPIU_RSP_CODE_OFFSET 8
enum {
- MASK_SCSI_STATUS = 0xFF,
- MASK_TASK_RESPONSE = 0xFF00,
- MASK_RSP_UPIU_RESULT = 0xFFFF,
- MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_EXCEPTION_EVENT = 0x10000,
MASK_TM_SERVICE_RESP = 0xFF,
- MASK_TM_FUNC = 0xFF,
};
/* Task management service response */
@@ -515,41 +517,6 @@ struct utp_cmd_rsp {
u8 sense_data[UFS_SENSE_SIZE];
};
-struct ufshpb_active_field {
- __be16 active_rgn;
- __be16 active_srgn;
-};
-#define HPB_ACT_FIELD_SIZE 4
-
-/**
- * struct utp_hpb_rsp - Response UPIU structure
- * @residual_transfer_count: Residual transfer count DW-3
- * @reserved1: Reserved double words DW-4 to DW-7
- * @sense_data_len: Sense data length DW-8 U16
- * @desc_type: Descriptor type of sense data
- * @additional_len: Additional length of sense data
- * @hpb_op: HPB operation type
- * @lun: LUN of response UPIU
- * @active_rgn_cnt: Active region count
- * @inactive_rgn_cnt: Inactive region count
- * @hpb_active_field: Recommended to read HPB region and subregion
- * @hpb_inactive_field: To be inactivated HPB region and subregion
- */
-struct utp_hpb_rsp {
- __be32 residual_transfer_count;
- __be32 reserved1[4];
- __be16 sense_data_len;
- u8 desc_type;
- u8 additional_len;
- u8 hpb_op;
- u8 lun;
- u8 active_rgn_cnt;
- u8 inactive_rgn_cnt;
- struct ufshpb_active_field hpb_active_field[2];
- __be16 hpb_inactive_field[2];
-};
-#define UTP_HPB_RSP_SIZE 40
-
/**
* struct utp_upiu_rsp - general upiu response structure
* @header: UPIU header structure DW-0 to DW-2
@@ -560,31 +527,10 @@ struct utp_upiu_rsp {
struct utp_upiu_header header;
union {
struct utp_cmd_rsp sr;
- struct utp_hpb_rsp hr;
struct utp_upiu_query qr;
};
};
-/**
- * struct ufs_query_req - parameters for building a query request
- * @query_func: UPIU header query function
- * @upiu_req: the query request data
- */
-struct ufs_query_req {
- u8 query_func;
- struct utp_upiu_query upiu_req;
-};
-
-/**
- * struct ufs_query_resp - UPIU QUERY
- * @response: device response code
- * @upiu_res: query response data
- */
-struct ufs_query_res {
- u8 response;
- struct utp_upiu_query upiu_res;
-};
-
/*
* VCCQ & VCCQ2 current requirement when UFS device is in sleep state
* and link is in Hibern8 state.
@@ -606,6 +552,14 @@ struct ufs_vreg_info {
struct ufs_vreg *vdd_hba;
};
+/* UFS device descriptor wPeriodicRTCUpdate bit9 defines RTC time baseline */
+#define UFS_RTC_TIME_BASELINE BIT(9)
+
+enum ufs_rtc_time {
+ UFS_RTC_RELATIVE,
+ UFS_RTC_ABSOLUTE
+};
+
struct ufs_dev_info {
bool f_power_on_wp_en;
/* Keeps information if any of the LU is power on write protected */
@@ -620,9 +574,6 @@ struct ufs_dev_info {
/* Stores the depth of queue in UFS device */
u8 bqueuedepth;
- /* UFS HPB related flag */
- bool hpb_enabled;
-
/* UFS WB related flags */
bool wb_enabled;
bool wb_buf_flush_enabled;
@@ -636,6 +587,11 @@ struct ufs_dev_info {
/* UFS EXT_IID Enable */
bool b_ext_iid_en;
+
+ /* UFS RTC */
+ enum ufs_rtc_time rtc_type;
+ time64_t rtc_time_baseline;
+ u32 rtc_update_period;
};
/*
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index bcb4f004bed5..41ff44dfa1db 100644
--- a/include/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -107,10 +107,4 @@ struct ufs_dev_quirk {
*/
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
-/*
- * Some UFS devices require L2P entry should be swapped before being sent to the
- * UFS device for HPB READ command.
- */
-#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
-
#endif /* UFS_QUIRKS_H_ */
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index df1d04f7a542..8e2bce9a4f21 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -16,10 +16,12 @@
#include <linux/blk-crypto-profile.h>
#include <linux/blk-mq.h>
#include <linux/devfreq.h>
+#include <linux/fault-inject.h>
#include <linux/msi.h>
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
#include <ufs/unipro.h>
#include <ufs/ufs.h>
#include <ufs/ufs_quirks.h>
@@ -27,6 +29,7 @@
#define UFSHCD "ufshcd"
+struct scsi_device;
struct ufs_hba;
enum dev_cmd_type {
@@ -202,6 +205,25 @@ struct ufshcd_lrb {
};
/**
+ * struct ufs_query_req - parameters for building a query request
+ * @query_func: UPIU header query function
+ * @upiu_req: the query request data
+ */
+struct ufs_query_req {
+ u8 query_func;
+ struct utp_upiu_query upiu_req;
+};
+
+/**
+ * struct ufs_query_resp - UPIU QUERY
+ * @response: device response code
+ * @upiu_res: query response data
+ */
+struct ufs_query_res {
+ struct utp_upiu_query upiu_res;
+};
+
+/**
* struct ufs_query - holds relevant data structures for query request
* @request: request upiu and function
* @descriptor: buffer for sending/receiving descriptor
@@ -225,7 +247,6 @@ struct ufs_dev_cmd {
struct mutex lock;
struct completion *complete;
struct ufs_query query;
- struct cq_entry *cqe;
};
/**
@@ -352,6 +373,7 @@ struct ufs_hba_variant_ops {
int (*get_outstanding_cqs)(struct ufs_hba *hba,
unsigned long *ocqs);
int (*config_esi)(struct ufs_hba *hba);
+ void (*config_scsi_dev)(struct scsi_device *sdev);
};
/* clock gating state */
@@ -408,6 +430,7 @@ struct ufs_clk_gating {
* @workq: workqueue to schedule devfreq suspend/resume work
* @suspend_work: worker to suspend devfreq
* @resume_work: worker to resume devfreq
+ * @target_freq: frequency requested by devfreq framework
* @min_gear: lowest HS gear to scale down to
* @is_enabled: tracks if scaling is currently enabled or not, controlled by
* clkscale_enable sysfs node
@@ -427,6 +450,7 @@ struct ufs_clk_scaling {
struct workqueue_struct *workq;
struct work_struct suspend_work;
struct work_struct resume_work;
+ unsigned long target_freq;
u32 min_gear;
bool is_enabled;
bool is_allowed;
@@ -578,11 +602,6 @@ enum ufshcd_quirks {
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
/*
- * Align DMA SG entries on a 4 KiB boundary.
- */
- UFSHCD_QUIRK_4KB_DMA_ALIGNMENT = 1 << 14,
-
- /*
* This quirk needs to be enabled if the host controller does not
* support UIC command
*/
@@ -611,6 +630,19 @@ enum ufshcd_quirks {
* to reinit the device after switching to maximum gear.
*/
UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19,
+
+ /*
+ * Some host raises interrupt (per queue) in addition to
+ * CQES (traditional) when ESI is disabled.
+ * Enable this quirk will disable CQES and use per queue interrupt.
+ */
+ UFSHCD_QUIRK_MCQ_BROKEN_INTR = 1 << 20,
+
+ /*
+ * Some host does not implement SQ Run Time Command (SQRTC) register
+ * thus need this quirk to skip related flow.
+ */
+ UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21,
};
enum ufshcd_caps {
@@ -697,31 +729,6 @@ struct ufs_hba_variant_params {
u32 wb_flush_threshold;
};
-#ifdef CONFIG_SCSI_UFS_HPB
-/**
- * struct ufshpb_dev_info - UFSHPB device related info
- * @num_lu: the number of user logical unit to check whether all lu finished
- * initialization
- * @rgn_size: device reported HPB region size
- * @srgn_size: device reported HPB sub-region size
- * @slave_conf_cnt: counter to check all lu finished initialization
- * @hpb_disabled: flag to check if HPB is disabled
- * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
- * @is_legacy: flag to check HPB 1.0
- * @control_mode: either host or device
- */
-struct ufshpb_dev_info {
- int num_lu;
- int rgn_size;
- int srgn_size;
- atomic_t slave_conf_cnt;
- bool hpb_disabled;
- u8 max_hpb_single_cmd;
- bool is_legacy;
- u8 control_mode;
-};
-#endif
-
struct ufs_hba_monitor {
unsigned long chunk_size;
@@ -858,6 +865,7 @@ enum ufshcd_mcq_opr {
* @auto_bkops_enabled: to track whether bkops is enabled in device
* @vreg_info: UFS device voltage regulator information
* @clk_list_head: UFS host controller clocks list node head
+ * @use_pm_opp: Indicates whether OPP based scaling is used or not
* @req_abort_count: number of times ufshcd_abort() has been called
* @lanes_per_direction: number of lanes per data direction between the UFS
* controller and the UFS device.
@@ -882,7 +890,6 @@ enum ufshcd_mcq_opr {
* @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
* management) after the UFS device has finished a WriteBooster buffer
* flush or auto BKOP.
- * @ufshpb_dev: information related to HPB (Host Performance Booster).
* @monitor: statistics about UFS commands
* @crypto_capabilities: Content of crypto capabilities register (0x100)
* @crypto_cap_array: Array of crypto capabilities
@@ -905,6 +912,8 @@ enum ufshcd_mcq_opr {
* @mcq_base: Multi circular queue registers base address
* @uhq: array of supported hardware queues
* @dev_cmd_queue: Queue for issuing device management commands
+ * @mcq_opr: MCQ operation and runtime registers
+ * @ufs_rtc_update_work: A work for UFS RTC periodic update
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -1009,6 +1018,7 @@ struct ufs_hba {
bool auto_bkops_enabled;
struct ufs_vreg_info vreg_info;
struct list_head clk_list_head;
+ bool use_pm_opp;
/* Number of requests aborts */
int req_abort_count;
@@ -1038,10 +1048,6 @@ struct ufs_hba {
struct request_queue *bsg_queue;
struct delayed_work rpm_dev_flush_recheck_work;
-#ifdef CONFIG_SCSI_UFS_HPB
- struct ufshpb_dev_info ufshpb_dev;
-#endif
-
struct ufs_hba_monitor monitor;
#ifdef CONFIG_SCSI_UFS_CRYPTO
@@ -1055,6 +1061,10 @@ struct ufs_hba {
struct delayed_work debugfs_ee_work;
u32 debugfs_ee_rate_limit_ms;
#endif
+#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
+ struct fault_attr trigger_eh_attr;
+ struct fault_attr timeout_attr;
+#endif
u32 luns_avail;
unsigned int nr_hw_queues;
unsigned int nr_queues[HCTX_MAX_TYPES];
@@ -1068,6 +1078,8 @@ struct ufs_hba {
struct ufs_hw_queue *uhq;
struct ufs_hw_queue *dev_cmd_queue;
struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX];
+
+ struct delayed_work ufs_rtc_update_work;
};
/**
@@ -1087,6 +1099,7 @@ struct ufs_hba {
* @cq_tail_slot: current slot to which CQ tail pointer is pointing
* @cq_head_slot: current slot to which CQ head pointer is pointing
* @cq_lock: Synchronize between multiple polling instances
+ * @sq_mutex: prevent submission queue concurrent access
*/
struct ufs_hw_queue {
void __iomem *mcq_sq_head;
@@ -1105,6 +1118,8 @@ struct ufs_hw_queue {
u32 cq_tail_slot;
u32 cq_head_slot;
spinlock_t cq_lock;
+ /* prevent concurrent access to submission queue */
+ struct mutex sq_mutex;
};
static inline bool is_mcq_enabled(struct ufs_hba *hba)
@@ -1225,6 +1240,8 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
ufshcd_writel(hba, tmp, reg);
}
+void ufshcd_enable_irq(struct ufs_hba *hba);
+void ufshcd_disable_irq(struct ufs_hba *hba);
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
@@ -1239,12 +1256,18 @@ void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
-unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
+int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data,
+ bool scaling_down);
/**
* ufshcd_set_variant - set variant specific data to the hba
* @hba: per adapter instance
@@ -1277,7 +1300,6 @@ extern int ufshcd_system_freeze(struct device *dev);
extern int ufshcd_system_thaw(struct device *dev);
extern int ufshcd_system_restore(struct device *dev);
#endif
-extern int ufshcd_shutdown(struct ufs_hba *hba);
extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
@@ -1349,7 +1371,6 @@ static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
}
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups);
@@ -1358,7 +1379,7 @@ void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
u8 **buf, bool ascii);
-int ufshcd_hold(struct ufs_hba *hba, bool async);
+void ufshcd_hold(struct ufs_hba *hba);
void ufshcd_release(struct ufs_hba *hba);
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
@@ -1369,12 +1390,6 @@ int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
-int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- int msgcode,
- u8 *desc_buff, int *buff_len,
- enum query_opcode desc_op);
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
struct ufs_ehs *ehs_rsp, int sg_cnt,
@@ -1384,6 +1399,7 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
+bool ufshcd_is_hba_active(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
static inline int ufshcd_vops_init(struct ufs_hba *hba)
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 11424bb03814..d5accacae6bc 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -11,7 +11,8 @@
#ifndef _UFSHCI_H
#define _UFSHCI_H
-#include <scsi/scsi_host.h>
+#include <linux/types.h>
+#include <ufs/ufs.h>
enum {
TASK_REQ_UPIU_SIZE_DWORDS = 8,
@@ -99,6 +100,9 @@ enum {
enum {
REG_SQHP = 0x0,
REG_SQTP = 0x4,
+ REG_SQRTC = 0x8,
+ REG_SQCTI = 0xC,
+ REG_SQRTS = 0x10,
};
enum {
@@ -111,12 +115,25 @@ enum {
REG_CQIE = 0x4,
};
+enum {
+ SQ_START = 0x0,
+ SQ_STOP = 0x1,
+ SQ_ICU = 0x2,
+};
+
+enum {
+ SQ_STS = 0x1,
+ SQ_CUS = 0x2,
+};
+
+#define SQ_ICU_ERR_CODE_MASK GENMASK(7, 4)
#define UFS_MASK(mask, offset) ((mask) << (offset))
/* UFS Version 08h */
#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
+#define UFSHCD_NUM_RESERVED 1
/*
* Controller UFSHCI version
* - 2.x and newer use the following scheme:
@@ -421,15 +438,13 @@ enum {
UTP_SCSI_COMMAND = 0x00000000,
UTP_NATIVE_UFS_COMMAND = 0x10000000,
UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
- UTP_REQ_DESC_INT_CMD = 0x01000000,
- UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
};
/* UTP Transfer Request Data Direction (DD) */
-enum {
- UTP_NO_DATA_TRANSFER = 0x00000000,
- UTP_HOST_TO_DEVICE = 0x02000000,
- UTP_DEVICE_TO_HOST = 0x04000000,
+enum utp_data_direction {
+ UTP_NO_DATA_TRANSFER = 0,
+ UTP_HOST_TO_DEVICE = 1,
+ UTP_DEVICE_TO_HOST = 2,
};
/* Overall command status values */
@@ -453,7 +468,7 @@ enum {
};
/* The maximum length of the data byte count field in the PRDT is 256KB */
-#define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024)
+#define PRDT_DATA_BYTE_COUNT_MAX SZ_256K
/* The granularity of the data byte count field in the PRDT is 32-bit */
#define PRDT_DATA_BYTE_COUNT_PAD 4
@@ -488,23 +503,43 @@ struct utp_transfer_cmd_desc {
/**
* struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
- * @dword0: Descriptor Header DW0
- * @dword1: Descriptor Header DW1
- * @dword2: Descriptor Header DW2
- * @dword3: Descriptor Header DW3
*/
struct request_desc_header {
- __le32 dword_0;
- __le32 dword_1;
- __le32 dword_2;
- __le32 dword_3;
+ u8 cci;
+ u8 ehs_length;
+#if defined(__BIG_ENDIAN)
+ u8 enable_crypto:1;
+ u8 reserved2:7;
+
+ u8 command_type:4;
+ u8 reserved1:1;
+ u8 data_direction:2;
+ u8 interrupt:1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved2:7;
+ u8 enable_crypto:1;
+
+ u8 interrupt:1;
+ u8 data_direction:2;
+ u8 reserved1:1;
+ u8 command_type:4;
+#else
+#error
+#endif
+
+ __le32 dunl;
+ u8 ocs;
+ u8 cds;
+ __le16 ldbc;
+ __le32 dunu;
};
+static_assert(sizeof(struct request_desc_header) == 16);
+
/**
* struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
* @header: UTRD header DW-0 to DW-3
- * @command_desc_base_addr_lo: UCD base address low DW-4
- * @command_desc_base_addr_hi: UCD base address high DW-5
+ * @command_desc_base_addr: UCD base address DW 4-5
* @response_upiu_length: response UPIU length DW-6
* @response_upiu_offset: response UPIU offset DW-6
* @prd_table_length: Physical region descriptor length DW-7
@@ -516,8 +551,7 @@ struct utp_transfer_req_desc {
struct request_desc_header header;
/* DW 4-5*/
- __le32 command_desc_base_addr_lo;
- __le32 command_desc_base_addr_hi;
+ __le64 command_desc_base_addr;
/* DW 6 */
__le16 response_upiu_length;
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
index dc9dd1d23f0f..360e1245fb40 100644
--- a/include/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -193,7 +193,7 @@
#define DME_LocalAFC0ReqTimeOutVal 0xD043
/* PA power modes */
-enum {
+enum ufs_pa_pwr_mode {
FAST_MODE = 1,
SLOW_MODE = 2,
FASTAUTO_MODE = 4,
@@ -205,7 +205,7 @@ enum {
#define PWRMODE_RX_OFFSET 4
/* PA TX/RX Frequency Series */
-enum {
+enum ufs_hs_gear_rate {
PA_HS_MODE_A = 1,
PA_HS_MODE_B = 2,
};
@@ -230,6 +230,12 @@ enum ufs_hs_gear_tag {
UFS_HS_G5 /* HS Gear 5 */
};
+enum ufs_lanes {
+ UFS_LANE_DONT_CHANGE, /* Don't change Lane */
+ UFS_LANE_1, /* Lane 1 (default for reset) */
+ UFS_LANE_2, /* Lane 2 */
+};
+
enum ufs_unipro_ver {
UFS_UNIPRO_VER_RESERVED = 0,
UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h
new file mode 100644
index 000000000000..c50d152e7b3e
--- /dev/null
+++ b/include/vdso/gettime.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VDSO_GETTIME_H
+#define _VDSO_GETTIME_H
+
+#include <linux/types.h>
+
+struct __kernel_timespec;
+struct timezone;
+
+#if !defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+struct old_timespec32;
+int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res);
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts);
+#else
+int __vdso_clock_getres(clockid_t clock, struct __kernel_timespec *res);
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+#endif
+
+__kernel_old_time_t __vdso_time(__kernel_old_time_t *t);
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+
+#endif
diff --git a/include/video/kyro.h b/include/video/kyro.h
index b958c2e9c915..418eef6c5523 100644
--- a/include/video/kyro.h
+++ b/include/video/kyro.h
@@ -38,18 +38,6 @@ struct kyrofb_info {
int wc_cookie;
};
-extern int kyro_dev_init(void);
-extern void kyro_dev_reset(void);
-
-extern unsigned char *kyro_dev_physical_fb_ptr(void);
-extern unsigned char *kyro_dev_virtual_fb_ptr(void);
-extern void *kyro_dev_physical_regs_ptr(void);
-extern void *kyro_dev_virtual_regs_ptr(void);
-extern unsigned int kyro_dev_fb_size(void);
-extern unsigned int kyro_dev_regs_size(void);
-
-extern u32 kyro_dev_overlay_offset(void);
-
/*
* benedict.gaster@superh.com
* Added the follow IOCTLS for the creation of overlay services...
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index 77252cb46361..a722dcbf5073 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -231,7 +231,7 @@ struct mmp_path {
/* layers */
int overlay_num;
- struct mmp_overlay overlays[];
+ struct mmp_overlay overlays[] __counted_by(overlay_num);
};
extern struct mmp_path *mmp_get_path(const char *name);
diff --git a/include/video/sticore.h b/include/video/sticore.h
new file mode 100644
index 000000000000..823666af1871
--- /dev/null
+++ b/include/video/sticore.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef STICORE_H
+#define STICORE_H
+
+struct device;
+
+/* generic STI structures & functions */
+
+#define MAX_STI_ROMS 4 /* max no. of ROMs which this driver handles */
+
+#define STI_REGION_MAX 8 /* hardcoded STI constants */
+#define STI_DEV_NAME_LENGTH 32
+#define STI_MONITOR_MAX 256
+
+#define STI_FONT_HPROMAN8 1
+#define STI_FONT_KANA8 2
+
+#define ALT_CODE_TYPE_UNKNOWN 0x00 /* alt code type values */
+#define ALT_CODE_TYPE_PA_RISC_64 0x01
+
+/* The latency of the STI functions cannot really be reduced by setting
+ * this to 0; STI doesn't seem to be designed to allow calling a different
+ * function (or the same function with different arguments) after a
+ * function exited with 1 as return value.
+ *
+ * As all of the functions below could be called from interrupt context,
+ * we have to spin_lock_irqsave around the do { ret = bla(); } while(ret==1)
+ * block. Really bad latency there.
+ *
+ * Probably the best solution to all this is have the generic code manage
+ * the screen buffer and a kernel thread to call STI occasionally.
+ *
+ * Luckily, the frame buffer guys have the same problem so we can just wait
+ * for them to fix it and steal their solution. prumpf
+ */
+
+#include <asm/io.h>
+
+#define STI_WAIT 1
+
+#define STI_PTR(p) ( virt_to_phys(p) )
+
+#define sti_onscreen_x(sti) (sti->glob_cfg->onscreen_x)
+#define sti_onscreen_y(sti) (sti->glob_cfg->onscreen_y)
+
+/* sti_font_xy() use the native font ROM ! */
+#define sti_font_x(sti) (PTR_STI(sti->font)->width)
+#define sti_font_y(sti) (PTR_STI(sti->font)->height)
+
+#ifdef CONFIG_64BIT
+#define STI_LOWMEM (GFP_KERNEL | GFP_DMA)
+#else
+#define STI_LOWMEM (GFP_KERNEL)
+#endif
+
+
+/* STI function configuration structs */
+
+typedef union region {
+ struct {
+ u32 offset : 14; /* offset in 4kbyte page */
+ u32 sys_only : 1; /* don't map to user space */
+ u32 cache : 1; /* map to data cache */
+ u32 btlb : 1; /* map to block tlb */
+ u32 last : 1; /* last region in list */
+ u32 length : 14; /* length in 4kbyte page */
+ } region_desc;
+
+ u32 region; /* complete region value */
+} region_t;
+
+#define REGION_OFFSET_TO_PHYS( rt, hpa ) \
+ (((rt).region_desc.offset << 12) + (hpa))
+
+struct sti_glob_cfg_ext {
+ u8 curr_mon; /* current monitor configured */
+ u8 friendly_boot; /* in friendly boot mode */
+ s16 power; /* power calculation (in Watts) */
+ s32 freq_ref; /* frequency reference */
+ u32 *sti_mem_addr; /* pointer to global sti memory (size=sti_mem_request) */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_glob_cfg {
+ s32 text_planes; /* number of planes used for text */
+ s16 onscreen_x; /* screen width in pixels */
+ s16 onscreen_y; /* screen height in pixels */
+ s16 offscreen_x; /* offset width in pixels */
+ s16 offscreen_y; /* offset height in pixels */
+ s16 total_x; /* frame buffer width in pixels */
+ s16 total_y; /* frame buffer height in pixels */
+ u32 *region_ptrs[STI_REGION_MAX]; /* region pointers */
+ s32 reent_lvl; /* storage for reentry level value */
+ u32 *save_addr; /* where to save or restore reentrant state */
+ u32 *ext_ptr; /* pointer to extended glob_cfg data structure */
+};
+
+
+/* STI init function structs */
+
+struct sti_init_flags {
+ u32 wait : 1; /* should routine idle wait or not */
+ u32 reset : 1; /* hard reset the device? */
+ u32 text : 1; /* turn on text display planes? */
+ u32 nontext : 1; /* turn on non-text display planes? */
+ u32 clear : 1; /* clear text display planes? */
+ u32 cmap_blk : 1; /* non-text planes cmap black? */
+ u32 enable_be_timer : 1; /* enable bus error timer */
+ u32 enable_be_int : 1; /* enable bus error timer interrupt */
+ u32 no_chg_tx : 1; /* don't change text settings */
+ u32 no_chg_ntx : 1; /* don't change non-text settings */
+ u32 no_chg_bet : 1; /* don't change berr timer settings */
+ u32 no_chg_bei : 1; /* don't change berr int settings */
+ u32 init_cmap_tx : 1; /* initialize cmap for text planes */
+ u32 cmt_chg : 1; /* change current monitor type */
+ u32 retain_ie : 1; /* don't allow reset to clear int enables */
+ u32 caller_bootrom : 1; /* set only by bootrom for each call */
+ u32 caller_kernel : 1; /* set only by kernel for each call */
+ u32 caller_other : 1; /* set only by non-[BR/K] caller */
+ u32 pad : 14; /* pad to word boundary */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_init_inptr_ext {
+ u8 config_mon_type; /* configure to monitor type */
+ u8 pad[1]; /* pad to word boundary */
+ u16 inflight_data; /* inflight data possible on PCI */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_init_inptr {
+ s32 text_planes; /* number of planes to use for text */
+ u32 *ext_ptr; /* pointer to extended init_graph inptr data structure*/
+};
+
+
+struct sti_init_outptr {
+ s32 errno; /* error number on failure */
+ s32 text_planes; /* number of planes used for text */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+
+
+/* STI configuration function structs */
+
+struct sti_conf_flags {
+ u32 wait : 1; /* should routine idle wait or not */
+ u32 pad : 31; /* pad to word boundary */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_conf_inptr {
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_conf_outptr_ext {
+ u32 crt_config[3]; /* hardware specific X11/OGL information */
+ u32 crt_hdw[3];
+ u32 *future_ptr;
+};
+
+struct sti_conf_outptr {
+ s32 errno; /* error number on failure */
+ s16 onscreen_x; /* screen width in pixels */
+ s16 onscreen_y; /* screen height in pixels */
+ s16 offscreen_x; /* offscreen width in pixels */
+ s16 offscreen_y; /* offscreen height in pixels */
+ s16 total_x; /* frame buffer width in pixels */
+ s16 total_y; /* frame buffer height in pixels */
+ s32 bits_per_pixel; /* bits/pixel device has configured */
+ s32 bits_used; /* bits which can be accessed */
+ s32 planes; /* number of fb planes in system */
+ u8 dev_name[STI_DEV_NAME_LENGTH]; /* null terminated product name */
+ u32 attributes; /* flags denoting attributes */
+ u32 *ext_ptr; /* pointer to future data */
+};
+
+struct sti_rom {
+ u8 type[4];
+ u8 res004;
+ u8 num_mons;
+ u8 revno[2];
+ u32 graphics_id[2];
+
+ u32 font_start;
+ u32 statesize;
+ u32 last_addr;
+ u32 region_list;
+
+ u16 reentsize;
+ u16 maxtime;
+ u32 mon_tbl_addr;
+ u32 user_data_addr;
+ u32 sti_mem_req;
+
+ u32 user_data_size;
+ u16 power;
+ u8 bus_support;
+ u8 ext_bus_support;
+ u8 alt_code_type;
+ u8 ext_dd_struct[3];
+ u32 cfb_addr;
+
+ u32 init_graph;
+ u32 state_mgmt;
+ u32 font_unpmv;
+ u32 block_move;
+ u32 self_test;
+ u32 excep_hdlr;
+ u32 inq_conf;
+ u32 set_cm_entry;
+ u32 dma_ctrl;
+ u8 res040[7 * 4];
+
+ u32 init_graph_addr;
+ u32 state_mgmt_addr;
+ u32 font_unp_addr;
+ u32 block_move_addr;
+ u32 self_test_addr;
+ u32 excep_hdlr_addr;
+ u32 inq_conf_addr;
+ u32 set_cm_entry_addr;
+ u32 image_unpack_addr;
+ u32 pa_risx_addrs[7];
+};
+
+struct sti_rom_font {
+ u16 first_char;
+ u16 last_char;
+ u8 width;
+ u8 height;
+ u8 font_type; /* language type */
+ u8 bytes_per_char;
+ s32 next_font; /* note: signed int */
+ u8 underline_height;
+ u8 underline_pos;
+ u8 res008[2];
+};
+
+/* sticore internal font handling */
+
+struct sti_cooked_font {
+ struct sti_rom_font *raw; /* native ptr for STI functions */
+ void *raw_ptr; /* kmalloc'ed font data */
+ struct sti_cooked_font *next_font;
+ int height, width;
+ int refcount;
+ u32 crc;
+};
+
+struct sti_cooked_rom {
+ struct sti_rom *raw;
+ struct sti_cooked_font *font_start;
+};
+
+/* STI font printing function structs */
+
+struct sti_font_inptr {
+ u32 *font_start_addr; /* address of font start */
+ s16 index; /* index into font table of character */
+ u8 fg_color; /* foreground color of character */
+ u8 bg_color; /* background color of character */
+ s16 dest_x; /* X location of character upper left */
+ s16 dest_y; /* Y location of character upper left */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_font_flags {
+ u32 wait : 1; /* should routine idle wait or not */
+ u32 non_text : 1; /* font unpack/move in non_text planes =1, text =0 */
+ u32 pad : 30; /* pad to word boundary */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_font_outptr {
+ s32 errno; /* error number on failure */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+/* STI blockmove structs */
+
+struct sti_blkmv_flags {
+ u32 wait : 1; /* should routine idle wait or not */
+ u32 color : 1; /* change color during move? */
+ u32 clear : 1; /* clear during move? */
+ u32 non_text : 1; /* block move in non_text planes =1, text =0 */
+ u32 pad : 28; /* pad to word boundary */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_blkmv_inptr {
+ u8 fg_color; /* foreground color after move */
+ u8 bg_color; /* background color after move */
+ s16 src_x; /* source upper left pixel x location */
+ s16 src_y; /* source upper left pixel y location */
+ s16 dest_x; /* dest upper left pixel x location */
+ s16 dest_y; /* dest upper left pixel y location */
+ s16 width; /* block width in pixels */
+ s16 height; /* block height in pixels */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+struct sti_blkmv_outptr {
+ s32 errno; /* error number on failure */
+ u32 *future_ptr; /* pointer to future data */
+};
+
+
+/* sti_all_data is an internal struct which needs to be allocated in
+ * low memory (< 4GB) if STI is used with 32bit STI on a 64bit kernel */
+
+struct sti_all_data {
+ struct sti_glob_cfg glob_cfg;
+ struct sti_glob_cfg_ext glob_cfg_ext;
+
+ struct sti_conf_inptr inq_inptr;
+ struct sti_conf_outptr inq_outptr; /* configuration */
+ struct sti_conf_outptr_ext inq_outptr_ext;
+
+ struct sti_init_inptr_ext init_inptr_ext;
+ struct sti_init_inptr init_inptr;
+ struct sti_init_outptr init_outptr;
+
+ struct sti_blkmv_inptr blkmv_inptr;
+ struct sti_blkmv_outptr blkmv_outptr;
+
+ struct sti_font_inptr font_inptr;
+ struct sti_font_outptr font_outptr;
+
+ /* leave as last entries */
+ unsigned long save_addr[1024 / sizeof(unsigned long)];
+ /* min 256 bytes which is STI default, max sti->sti_mem_request */
+ unsigned long sti_mem_addr[256 / sizeof(unsigned long)];
+ /* do not add something below here ! */
+};
+
+/* internal generic STI struct */
+
+struct sti_struct {
+ spinlock_t lock;
+
+ /* char **mon_strings; */
+ int sti_mem_request;
+ u32 graphics_id[2];
+
+ struct sti_cooked_rom *rom;
+
+ unsigned long font_unpmv;
+ unsigned long block_move;
+ unsigned long init_graph;
+ unsigned long inq_conf;
+ int do_call64; /* call 64-bit code */
+
+ /* all following fields are initialized by the generic routines */
+ int text_planes;
+ region_t regions[STI_REGION_MAX];
+ unsigned long regions_phys[STI_REGION_MAX];
+
+ struct sti_glob_cfg *glob_cfg; /* points into sti_all_data */
+
+ int wordmode;
+ struct sti_cooked_font *font; /* ptr to selected font (cooked) */
+
+ struct pci_dev *pd;
+
+ /* PCI data structures (pg. 17ff from sti.pdf) */
+ u8 rm_entry[16]; /* pci region mapper array == pci config space offset */
+
+ /* pointer to the parent device */
+ struct device *dev;
+
+ /* pointer to all internal data */
+ struct sti_all_data *sti_data;
+
+ /* pa_path of this device */
+ char pa_path[24];
+};
+
+
+/* sticore interface functions */
+
+struct sti_struct *sti_get_rom(unsigned int index); /* 0: default sti */
+void sti_font_convert_bytemode(struct sti_struct *sti, struct sti_cooked_font *f);
+
+
+/* sticore main function to call STI firmware */
+
+int sti_call(const struct sti_struct *sti, unsigned long func,
+ const void *flags, void *inptr, void *outptr,
+ struct sti_glob_cfg *glob_cfg);
+
+
+/* functions to call the STI ROM directly */
+
+void sti_putc(struct sti_struct *sti, int c, int y, int x,
+ struct sti_cooked_font *font);
+void sti_set(struct sti_struct *sti, int src_y, int src_x,
+ int height, int width, u8 color);
+void sti_clear(struct sti_struct *sti, int src_y, int src_x,
+ int height, int width, int c, struct sti_cooked_font *font);
+void sti_bmove(struct sti_struct *sti, int src_y, int src_x,
+ int dst_y, int dst_x, int height, int width,
+ struct sti_cooked_font *font);
+
+#endif /* STICORE_H */
diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
index 8d2a3bfc8dac..47d96e75e8ef 100644
--- a/include/video/uvesafb.h
+++ b/include/video/uvesafb.h
@@ -109,8 +109,6 @@ struct uvesafb_ktask {
u32 ack;
};
-static int uvesafb_exec(struct uvesafb_ktask *tsk);
-
#define UVESAFB_EXACT_RES 1
#define UVESAFB_EXACT_DEPTH 2
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
index 43ef24dd030e..9995695204f5 100644
--- a/include/xen/arm/hypervisor.h
+++ b/include/xen/arm/hypervisor.h
@@ -7,18 +7,6 @@
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
- return PARAVIRT_LAZY_NONE;
-}
-
#ifdef CONFIG_XEN
void __init xen_early_init(void);
#else
diff --git a/include/xen/events.h b/include/xen/events.h
index 44c2855c76d1..3b07409f8032 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -69,13 +69,12 @@ int xen_set_irq_priority(unsigned irq, unsigned priority);
/*
* Allow extra references to event channels exposed to userspace by evtchn
*/
-int evtchn_make_refcounted(evtchn_port_t evtchn);
+int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static);
int evtchn_get(evtchn_port_t evtchn);
void evtchn_put(evtchn_port_t evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(evtchn_port_t evtchn, int irq);
-int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
static inline void notify_remote_via_evtchn(evtchn_port_t port)
{
@@ -89,7 +88,6 @@ void xen_irq_resume(void);
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq);
-void xen_set_irq_pending(int irq);
bool xen_test_irq_pending(int irq);
/* Poll waiting for an irq to become pending. In the usual case, the
@@ -102,12 +100,11 @@ void xen_poll_irq_timeout(int irq, u64 timeout);
/* Determine the IRQ which is bound to an event channel */
unsigned int irq_from_evtchn(evtchn_port_t evtchn);
-int irq_from_virq(unsigned int cpu, unsigned int virq);
-evtchn_port_t evtchn_from_irq(unsigned irq);
+int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
+ evtchn_port_t *evtchn);
int xen_set_callback_via(uint64_t via);
-void xen_evtchn_do_upcall(struct pt_regs *regs);
-int xen_hvm_evtchn_do_upcall(void);
+int xen_evtchn_do_upcall(void);
/* Bind a pirq for a physical interrupt to an irq. */
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
@@ -124,9 +121,6 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
/* De-allocates the above mentioned physical interrupt. */
int xen_destroy_irq(int irq);
-/* Return irq from pirq */
-int xen_irq_from_pirq(unsigned pirq);
-
/* Return the pirq allocated to the irq. */
int xen_pirq_from_irq(unsigned irq);
@@ -138,4 +132,16 @@ int xen_test_irq_shared(int irq);
/* initialize Xen IRQ subsystem */
void xen_init_IRQ(void);
+
+irqreturn_t xen_debug_interrupt(int irq, void *dev_id);
+
+static inline void xen_evtchn_close(evtchn_port_t port)
+{
+ struct evtchn_close close;
+
+ close.port = port;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+}
+
#endif /* _XEN_EVENTS_H */
diff --git a/include/xen/interface/hvm/ioreq.h b/include/xen/interface/hvm/ioreq.h
new file mode 100644
index 000000000000..b02cfeae7eb5
--- /dev/null
+++ b/include/xen/interface/hvm/ioreq.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * ioreq.h: I/O request definitions for device models
+ * Copyright (c) 2004, Intel Corporation.
+ */
+
+#ifndef __XEN_PUBLIC_HVM_IOREQ_H__
+#define __XEN_PUBLIC_HVM_IOREQ_H__
+
+#define IOREQ_READ 1
+#define IOREQ_WRITE 0
+
+#define STATE_IOREQ_NONE 0
+#define STATE_IOREQ_READY 1
+#define STATE_IOREQ_INPROCESS 2
+#define STATE_IORESP_READY 3
+
+#define IOREQ_TYPE_PIO 0 /* pio */
+#define IOREQ_TYPE_COPY 1 /* mmio ops */
+#define IOREQ_TYPE_PCI_CONFIG 2
+#define IOREQ_TYPE_TIMEOFFSET 7
+#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */
+
+/*
+ * VMExit dispatcher should cooperate with instruction decoder to
+ * prepare this structure and notify service OS and DM by sending
+ * virq.
+ *
+ * For I/O type IOREQ_TYPE_PCI_CONFIG, the physical address is formatted
+ * as follows:
+ *
+ * 63....48|47..40|39..35|34..32|31........0
+ * SEGMENT |BUS |DEV |FN |OFFSET
+ */
+struct ioreq {
+ uint64_t addr; /* physical address */
+ uint64_t data; /* data (or paddr of data) */
+ uint32_t count; /* for rep prefixes */
+ uint32_t size; /* size in bytes */
+ uint32_t vp_eport; /* evtchn for notifications to/from device model */
+ uint16_t _pad0;
+ uint8_t state:4;
+ uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
+ * of the real data to use. */
+ uint8_t dir:1; /* 1=read, 0=write */
+ uint8_t df:1;
+ uint8_t _pad1:1;
+ uint8_t type; /* I/O type */
+};
+
+#endif /* __XEN_PUBLIC_HVM_IOREQ_H__ */
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
index 18417b017869..60e42d3b760e 100644
--- a/include/xen/interface/io/displif.h
+++ b/include/xen/interface/io/displif.h
@@ -537,7 +537,7 @@ struct xendispl_dbuf_create_req {
struct xendispl_page_directory {
grant_ref_t gref_dir_next_page;
- grant_ref_t gref[1]; /* Variable length */
+ grant_ref_t gref[];
};
/*
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index ba4c4274b714..4fef1efcdcab 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -95,7 +95,7 @@ struct __name##_sring { \
RING_IDX req_prod, req_event; \
RING_IDX rsp_prod, rsp_event; \
uint8_t __pad[48]; \
- union __name##_sring_entry ring[1]; /* variable-length */ \
+ union __name##_sring_entry ring[]; \
}; \
\
/* "Front" end's private variables */ \
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
index 445657cdb1de..b818517588b5 100644
--- a/include/xen/interface/io/sndif.h
+++ b/include/xen/interface/io/sndif.h
@@ -659,7 +659,7 @@ struct xensnd_open_req {
struct xensnd_page_directory {
grant_ref_t gref_dir_next_page;
- grant_ref_t gref[1]; /* Variable length */
+ grant_ref_t gref[];
};
/*
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 0efeb652f9b8..a1e5b3f18d69 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,8 +29,17 @@ extern bool xen_pvh;
extern uint32_t xen_start_flags;
+#ifdef CONFIG_XEN_PV
+extern bool xen_pv_pci_possible;
+#else
+#define xen_pv_pci_possible 0
+#endif
+
#include <xen/interface/hvm/start_info.h>
extern struct hvm_start_info pvh_start_info;
+void xen_prepare_pvh(void);
+struct pt_regs;
+void xen_pv_evtchn_do_upcall(struct pt_regs *regs);
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>